hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
98e51e3b6e591a28a70e5b4254db5295ab8b6bfa
| 15,889
|
py
|
Python
|
coba/tests/test_registry.py
|
mrucker/banditbenchmark
|
0365291b3a0cf1d862d294e0386d0ccad3f360f1
|
[
"BSD-3-Clause"
] | null | null | null |
coba/tests/test_registry.py
|
mrucker/banditbenchmark
|
0365291b3a0cf1d862d294e0386d0ccad3f360f1
|
[
"BSD-3-Clause"
] | null | null | null |
coba/tests/test_registry.py
|
mrucker/banditbenchmark
|
0365291b3a0cf1d862d294e0386d0ccad3f360f1
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from coba.exceptions import CobaException
from coba.registry import CobaRegistry, coba_registry_class, JsonMakerV1, JsonMakerV2
from coba.environments import OpenmlSimulation
class TestObject:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class TestArgObject:
def __init__(self, arg):
self.arg = arg
class TestOptionalArgObject:
def __init__(self, arg=1):
self.arg = arg
class CobaRegistry_Tests(unittest.TestCase):
def setUp(self) -> None:
CobaRegistry.clear() #make sure the registry is fresh each test
def test_endpoint_loaded(self):
obj = CobaRegistry.registry["Null"]
self.assertEqual("NullSink", obj.__name__)
def test_endpoint_loaded_after_decorator_register(self):
@coba_registry_class("MyTestObject")
class MyTestObject(TestObject): pass
obj = CobaRegistry.registry["Null"]
self.assertEqual("NullSink", obj.__name__)
def test_register_decorator(self):
@coba_registry_class("MyTestObject")
class MyTestObject(TestObject): pass
"MyTestObject" in CobaRegistry.registry
class JsonMakerV1_Tests(unittest.TestCase):
def test_make(self):
obj = JsonMakerV1({"test": TestObject}).make("test")
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, ())
self.assertEqual(obj.kwargs, {})
def test_make_args1(self):
obj = JsonMakerV1({"test": TestObject}).make({ "test": [1,2,3] })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, (1,2,3))
self.assertEqual(obj.kwargs, {})
def test_make_args2(self):
obj = JsonMakerV1({"test": TestObject}).make({ "test": 1 })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, (1,))
self.assertEqual(obj.kwargs, {})
def test_make_kwargs(self):
obj = JsonMakerV1({"test": TestObject}).make({ "test": {"a":1} })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, ())
self.assertEqual(obj.kwargs, {"a":1})
def test_make_args3(self):
obj = JsonMakerV1({"test": TestObject}).make({ "test": "abc" })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, ("abc",))
self.assertEqual(obj.kwargs, {})
def test_make_args_kwargs(self):
obj = JsonMakerV1({"test": TestObject}).make({ "test": [1,2,3], "kwargs": {"a":1} })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, (1,2,3))
self.assertEqual(obj.kwargs, {"a":1})
def test_make_name_args_kwargs(self):
obj = JsonMakerV1({"test": TestObject}).make({ "name": "test", "args": [1,2,3], "kwargs": {"a":1} })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, (1,2,3))
self.assertEqual(obj.kwargs, {"a":1})
def test_make_foreach1(self):
recipe = { "test":[[1,2,3]], "kwargs": {"a":1}, "method":"foreach" }
objs = JsonMakerV1({"test": TestObject}).make(recipe)
self.assertEqual(len(objs), 1)
self.assertEqual(objs[0].args, (1,2,3))
self.assertEqual(objs[0].kwargs, {"a":1})
def test_make_foreach2(self):
recipe = { "test":[1,2,3], "kwargs": {"a":1}, "method":"foreach" }
objs = JsonMakerV1({"test": TestObject}).make(recipe)
self.assertEqual(len(objs), 3)
self.assertEqual(objs[0].args, (1,))
self.assertEqual(objs[0].kwargs, {"a":1})
self.assertEqual(objs[1].args, (2,))
self.assertEqual(objs[1].kwargs, {"a":1})
self.assertEqual(objs[2].args, (3,))
self.assertEqual(objs[2].kwargs, {"a":1})
def test_make_foreach3(self):
recipe = { "test":[1,2], "kwargs": [{"a":1},{"a":2}], "method":"foreach" }
objs = JsonMakerV1({"test": TestObject}).make(recipe)
self.assertEqual(len(objs), 2)
self.assertEqual(objs[0].args, (1,))
self.assertEqual(objs[0].kwargs, {"a":1})
self.assertEqual(objs[1].args, (2,))
self.assertEqual(objs[1].kwargs, {"a":2})
def test_make_foreach4(self):
recipe = { "test":[[1,2],3], "method":"foreach" }
objs = JsonMakerV1({"test": TestObject}).make(recipe)
self.assertEqual(len(objs), 2)
self.assertEqual(objs[0].args, (1,2))
self.assertEqual(objs[0].kwargs, {})
self.assertEqual(objs[1].args, (3,))
self.assertEqual(objs[1].kwargs, {})
def test_make_recursive1(self):
obj = JsonMakerV1({"test": TestObject}).make({ "test": "test" })
self.assertEqual(1, len(obj.args))
self.assertEqual(obj.kwargs, {})
self.assertIsInstance(obj.args[0], TestObject)
self.assertEqual(obj.args[0].args, ())
self.assertEqual(obj.args[0].kwargs, {})
def test_make_recursive2(self):
obj = JsonMakerV1({"test": TestObject}).make({ "test": {"test":1} })
self.assertEqual(1, len(obj.args))
self.assertEqual(obj.kwargs, {})
self.assertIsInstance(obj.args[0], TestObject)
self.assertEqual(obj.args[0].args, (1,))
self.assertEqual(obj.args[0].kwargs, {})
def test_make_recursive3(self):
obj = JsonMakerV1({"test": TestObject}).make({ "test": {"a": "test"} })
self.assertEqual(obj.args, ())
self.assertEqual(1, len(obj.kwargs))
self.assertIsInstance(obj.kwargs["a"], TestObject)
self.assertEqual(obj.kwargs["a"].args, ())
self.assertEqual(obj.kwargs["a"].kwargs, {})
def test_make_array_arg(self):
obj = JsonMakerV1({"test": TestArgObject}).make({ "test": [1,2,3] })
self.assertEqual(obj.arg, [1,2,3])
def test_make_dict_arg(self):
with self.assertRaises(Exception):
JsonMakerV1({"test": TestArgObject}).make({ "test": {"a":1} })
def test_make_optionalarray_arg(self):
obj = JsonMakerV1({"test": TestOptionalArgObject}).make({ "test": [1,2,3] })
self.assertEqual(obj.arg, [1,2,3])
def test_not_registered(self):
with self.assertRaises(Exception) as cm:
JsonMakerV1({"test": TestObject}).make("test2")
self.assertEqual("Unknown recipe test2", str(cm.exception))
def test_invalid_recipe1(self):
recipe = {"test":[1,2,3], "args":[4,5,6] }
with self.assertRaises(Exception) as cm:
JsonMakerV1({"test": TestObject}).make(recipe)
self.assertEqual(f"Invalid recipe {str(recipe)}", str(cm.exception))
def test_invalid_recipe2(self):
recipe = {"test":[1,2,3], "name":"test", "args":[4,5,6]}
with self.assertRaises(Exception) as cm:
JsonMakerV1({"test": TestObject}).make(recipe)
self.assertEqual(f"Invalid recipe {str(recipe)}", str(cm.exception))
def test_invalid_recipe3(self):
recipe = {"test":{"a":1}, "name":"test", "kwargs":{"a":1}}
with self.assertRaises(Exception) as cm:
JsonMakerV1({"test": TestObject}).make(recipe)
self.assertEqual(f"Invalid recipe {str(recipe)}", str(cm.exception))
def test_invalid_recipe4(self):
recipe = 1
with self.assertRaises(Exception) as cm:
JsonMakerV1({"test": TestObject}).make(recipe)
self.assertEqual(f"Invalid recipe {str(recipe)}", str(cm.exception))
def test_make_optionalarray_arg(self):
obj = JsonMakerV1({"test": TestOptionalArgObject}).make({ "test": [1,2,3] })
self.assertEqual(obj.arg, [1,2,3])
class JsonMakerV2_Tests(unittest.TestCase):
def test_registed_make_no_args_no_kwargs(self):
obj = JsonMakerV2({"test": TestObject}).make("test")
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, ())
self.assertEqual(obj.kwargs, {})
def test_make_args1(self):
obj = JsonMakerV2({"test": TestObject}).make({ "test": [1,2,3] })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, (1,2,3))
self.assertEqual(obj.kwargs, {})
def test_make_args2(self):
obj = JsonMakerV2({"test": TestObject}).make({ "test": 1 })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, (1,))
self.assertEqual(obj.kwargs, {})
def test_make_args3(self):
obj = JsonMakerV1({"test": TestObject}).make({ "test": "abc" })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, ("abc",))
self.assertEqual(obj.kwargs, {})
def test_make_kwargs(self):
obj = JsonMakerV2({"test": TestObject}).make({ "test": {"a":1} })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, ())
self.assertEqual(obj.kwargs, {"a":1})
def test_make_args_kwargs(self):
obj = JsonMakerV2({"test": TestObject}).make({ "test": [1,2,3,'**',{"a":1}] })
self.assertIsInstance(obj, TestObject)
self.assertEqual(obj.args, (1,2,3))
self.assertEqual(obj.kwargs, {"a":1})
def test_make_for_no_args_no_kwargs(self):
objs = JsonMakerV2({"test": TestObject}).make({ "test":[], "for":[1,2] })
self.assertEqual(2, len(objs))
self.assertIsInstance(objs[0], TestObject)
self.assertEqual(objs[0].args, ())
self.assertEqual(objs[0].kwargs, {})
self.assertIsInstance(objs[1], TestObject)
self.assertEqual(objs[1].args, ())
self.assertEqual(objs[1].kwargs, {})
def test_make_array_arg(self):
obj = JsonMakerV2({"test": TestArgObject}).make({ "test": [[1,2,3]] })
self.assertEqual(obj.arg, [1,2,3])
def test_make_dict_arg(self):
with self.assertRaises(Exception):
JsonMakerV2({"test": TestArgObject}).make({ "test": {"a":1} })
def test_make_default_arg1(self):
obj = JsonMakerV2({"test": TestOptionalArgObject}).make({ "test": [[1,2,3]] })
self.assertEqual(obj.arg, [1,2,3])
def test_make_default_arg2(self):
obj = JsonMakerV2({"test": TestOptionalArgObject}).make({ "test": [] })
self.assertEqual(obj.arg, 1)
def test_make_for_arg(self):
objs = JsonMakerV2({"test": TestObject}).make({ "test":"$", "for":[1,2] })
self.assertEqual(2, len(objs))
self.assertIsInstance(objs[0], TestObject)
self.assertEqual(objs[0].args, (1,))
self.assertEqual(objs[0].kwargs, {})
self.assertIsInstance(objs[1], TestObject)
self.assertEqual(objs[1].args, (2,))
self.assertEqual(objs[1].kwargs, {})
def test_make_for_args(self):
objs = JsonMakerV2({"test": TestObject}).make({ "test":["$",9], "for":[1,2] })
self.assertEqual(2, len(objs))
self.assertIsInstance(objs[0], TestObject)
self.assertEqual(objs[0].args, (1,9))
self.assertEqual(objs[0].kwargs, {})
self.assertIsInstance(objs[1], TestObject)
self.assertEqual(objs[1].args, (2,9))
self.assertEqual(objs[1].kwargs, {})
def test_make_for_zip(self):
objs = JsonMakerV2({"test": TestObject, "zip":zip}).make({ "test":"$", "for":{"zip":[[1,2],[3,4]] }})
self.assertEqual(2, len(objs))
self.assertIsInstance(objs[0], TestObject)
self.assertEqual(objs[0].args, ((1,3),))
self.assertEqual(objs[0].kwargs, {})
self.assertIsInstance(objs[1], TestObject)
self.assertEqual(objs[1].args, ((2,4),))
self.assertEqual(objs[1].kwargs, {})
def test_make_for_kwargs(self):
objs = JsonMakerV2({"test": TestObject}).make({ "test":{"a":"$",'b':3} , "for":[1,2] })
self.assertEqual(2, len(objs))
self.assertIsInstance(objs[0], TestObject)
self.assertEqual(objs[0].args, ())
self.assertEqual(objs[0].kwargs, {'a':1,'b':3})
self.assertIsInstance(objs[1], TestObject)
self.assertEqual(objs[1].args, ())
self.assertEqual(objs[1].kwargs, {'a':2,'b':3})
def test_make_recursive1(self):
obj = JsonMakerV2({"test": TestObject}).make({ "test": "test" })
self.assertEqual(1, len(obj.args))
self.assertEqual(obj.kwargs, {})
self.assertIsInstance(obj.args[0], TestObject)
self.assertEqual(obj.args[0].args, ())
self.assertEqual(obj.args[0].kwargs, {})
def test_make_recursive2(self):
obj = JsonMakerV2({"test": TestObject}).make({ "test": [{"test":1}] })
self.assertEqual(1, len(obj.args))
self.assertEqual(obj.kwargs, {})
self.assertIsInstance(obj.args[0], TestObject)
self.assertEqual(obj.args[0].args, (1,))
self.assertEqual(obj.args[0].kwargs, {})
def test_make_recursive3(self):
obj = JsonMakerV2({"test": TestObject}).make({ "test": {"a": "test"} })
self.assertEqual(obj.args, ())
self.assertEqual(1, len(obj.kwargs))
self.assertIsInstance(obj.kwargs["a"], TestObject)
self.assertEqual(obj.kwargs["a"].args, ())
self.assertEqual(obj.kwargs["a"].kwargs, {})
def test_make_unmakeable(self):
recipe = 1
with self.assertRaises(CobaException) as e:
JsonMakerV2({"test": TestObject}).make(recipe)
self.assertEqual(f"We were unable to make {recipe}.", str(e.exception))
class JsonMakerV2Regression_Tests(unittest.TestCase):
def test_openmlsimulation_for_interface_consistency(self):
sim = JsonMakerV2(CobaRegistry.registry).make({"OpenmlSimulation":1})
self.assertIsInstance(sim, OpenmlSimulation)
self.assertEqual(sim.params['openml_data'], 1)
self.assertEqual(sim.params['cat_as_str'], False)
self.assertEqual(sim.params['drop_missing'], True)
self.assertEqual(sim.params['drop_missing'], True)
self.assertNotIn('reservoir_take', sim.params)
sim = JsonMakerV2(CobaRegistry.registry).make({"OpenmlSimulation":[1,True]})
self.assertIsInstance(sim, OpenmlSimulation)
self.assertEqual(sim.params['openml_data'], 1)
self.assertEqual(sim.params['cat_as_str'], True)
self.assertEqual(sim.params['drop_missing'], True)
self.assertNotIn('reservoir_take', sim.params)
sim = JsonMakerV2(CobaRegistry.registry).make({"OpenmlSimulation":[1,True,False]})
self.assertIsInstance(sim, OpenmlSimulation)
self.assertEqual(sim.params['openml_data'], 1)
self.assertEqual(sim.params['cat_as_str'], True)
self.assertEqual(sim.params['drop_missing'], False)
self.assertNotIn('reservoir_take', sim.params)
sim = JsonMakerV2(CobaRegistry.registry).make({"OpenmlSimulation":[1,True,False,100]})
self.assertIsInstance(sim, OpenmlSimulation)
self.assertEqual(sim.params['openml_data'], 1)
self.assertEqual(sim.params['cat_as_str'], True)
self.assertEqual(sim.params['drop_missing'], False)
self.assertEqual(sim.params['reservoir_count'], 100)
sim = JsonMakerV2(CobaRegistry.registry).make({"OpenmlSimulation":{"data_id":1,"cat_as_str":True,"drop_missing":False,"take":100}})
self.assertIsInstance(sim, OpenmlSimulation)
self.assertEqual(sim.params['openml_data'], 1)
self.assertEqual(sim.params['cat_as_str'], True)
self.assertEqual(sim.params['drop_missing'], False)
self.assertEqual(sim.params['reservoir_count'], 100)
sim = JsonMakerV2(CobaRegistry.registry).make({"OpenmlSimulation":{"task_id":1,"cat_as_str":True,"drop_missing":False,"take":100}})
self.assertIsInstance(sim, OpenmlSimulation)
self.assertEqual(sim.params['openml_task'], 1)
self.assertEqual(sim.params['cat_as_str'], True)
self.assertEqual(sim.params['drop_missing'], False)
self.assertEqual(sim.params['reservoir_count'], 100)
if __name__ == '__main__':
unittest.main()
| 34.768053
| 139
| 0.621751
| 1,877
| 15,889
| 5.158231
| 0.067661
| 0.202954
| 0.092956
| 0.052262
| 0.89465
| 0.864904
| 0.840735
| 0.811093
| 0.772258
| 0.742615
| 0
| 0.026707
| 0.203474
| 15,889
| 457
| 140
| 34.768053
| 0.738306
| 0.00258
| 0
| 0.647249
| 0
| 0
| 0.078306
| 0
| 0
| 0
| 0
| 0
| 0.572816
| 1
| 0.161812
| false
| 0.006472
| 0.012945
| 0
| 0.203884
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c71ce6d15b74ebcd536dd4e21985c0b8a6561e03
| 122
|
py
|
Python
|
SmerekaRoman/CodeWars/CW 9.3.py
|
kolyasalubov/Lv-639.pythonCore
|
06f10669a188318884adb00723127465ebdf2907
|
[
"MIT"
] | null | null | null |
SmerekaRoman/CodeWars/CW 9.3.py
|
kolyasalubov/Lv-639.pythonCore
|
06f10669a188318884adb00723127465ebdf2907
|
[
"MIT"
] | null | null | null |
SmerekaRoman/CodeWars/CW 9.3.py
|
kolyasalubov/Lv-639.pythonCore
|
06f10669a188318884adb00723127465ebdf2907
|
[
"MIT"
] | null | null | null |
class Human:
pass
class Man(Human):
pass
class Woman(Human):
pass
def God():
return [Man(), Woman()]
| 12.2
| 27
| 0.57377
| 16
| 122
| 4.375
| 0.5
| 0.385714
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.286885
| 122
| 10
| 28
| 12.2
| 0.804598
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| true
| 0.375
| 0
| 0.125
| 0.625
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
c75cac7709edaa53a7176d3be61952f0afab6631
| 8,880
|
py
|
Python
|
trankit/__init__.py
|
jsteggink/trankit
|
61ef593999bfa29751990d0d4bcf259daed05db4
|
[
"Apache-2.0"
] | 613
|
2021-01-12T14:21:13.000Z
|
2022-03-29T19:51:47.000Z
|
trankit/__init__.py
|
jsteggink/trankit
|
61ef593999bfa29751990d0d4bcf259daed05db4
|
[
"Apache-2.0"
] | 38
|
2021-01-13T12:01:15.000Z
|
2022-03-31T14:13:44.000Z
|
trankit/__init__.py
|
jsteggink/trankit
|
61ef593999bfa29751990d0d4bcf259daed05db4
|
[
"Apache-2.0"
] | 77
|
2021-01-13T07:33:26.000Z
|
2022-03-29T19:51:50.000Z
|
from .pipeline import Pipeline
from .tpipeline import TPipeline
from .pipeline import supported_langs, langwithner, remove_with_path
from .utils.base_utils import download, trankit2conllu
from .utils.tbinfo import supported_embeddings, supported_langs, saved_model_version
import os
from shutil import copyfile
__version__ = "1.1.0"
def download_missing_files(category, save_dir, embedding_name, language):
assert language in supported_langs, '{} is not a pretrained language. Current pretrained languages: {}'.format(language, supported_langs)
assert embedding_name in supported_embeddings, '{} has not been supported. Current supported embeddings: {}'.format(embedding_name, supported_embeddings)
import os
assert category in {'customized', 'customized-ner', 'customized-mwt',
'customized-mwt-ner'}, "Pipeline category must be one of the following: 'customized', 'customized-ner', 'customized-mwt', 'customized-mwt-ner'"
if category == 'customized':
file_list = [
('{}.tokenizer.mdl', os.path.join(save_dir, embedding_name, category, '{}.tokenizer.mdl'.format(category))),
('{}.tagger.mdl', os.path.join(save_dir, embedding_name, category, '{}.tagger.mdl'.format(category))),
('{}.vocabs.json', os.path.join(save_dir, embedding_name, category, '{}.vocabs.json'.format(category))),
('{}_lemmatizer.pt', os.path.join(save_dir, embedding_name, category, '{}_lemmatizer.pt'.format(category)))
]
elif category == 'customized-ner':
file_list = [
('{}.tokenizer.mdl', os.path.join(save_dir, embedding_name, category, '{}.tokenizer.mdl'.format(category))),
('{}.tagger.mdl', os.path.join(save_dir, embedding_name, category, '{}.tagger.mdl'.format(category))),
('{}.vocabs.json', os.path.join(save_dir, embedding_name, category, '{}.vocabs.json'.format(category))),
('{}_lemmatizer.pt', os.path.join(save_dir, embedding_name, category, '{}_lemmatizer.pt'.format(category))),
('{}.ner.mdl', os.path.join(save_dir, embedding_name, category, '{}.ner.mdl'.format(category))),
('{}.ner-vocab.json', os.path.join(save_dir, embedding_name, category, '{}.ner-vocab.json'.format(category)))
]
elif category == 'customized-mwt':
file_list = [
('{}.tokenizer.mdl', os.path.join(save_dir, embedding_name, category, '{}.tokenizer.mdl'.format(category))),
('{}_mwt_expander.pt', os.path.join(save_dir, embedding_name, category, '{}_mwt_expander.pt'.format(category))),
('{}.tagger.mdl', os.path.join(save_dir, embedding_name, category, '{}.tagger.mdl'.format(category))),
('{}.vocabs.json', os.path.join(save_dir, embedding_name, category, '{}.vocabs.json'.format(category))),
('{}_lemmatizer.pt', os.path.join(save_dir, embedding_name, category, '{}_lemmatizer.pt'.format(category)))
]
elif category == 'customized-mwt-ner':
file_list = [
('{}.tokenizer.mdl', os.path.join(save_dir, embedding_name, category, '{}.tokenizer.mdl'.format(category))),
('{}_mwt_expander.pt', os.path.join(save_dir, embedding_name, category, '{}_mwt_expander.pt'.format(category))),
('{}.tagger.mdl', os.path.join(save_dir, embedding_name, category, '{}.tagger.mdl'.format(category))),
('{}.vocabs.json', os.path.join(save_dir, embedding_name, category, '{}.vocabs.json'.format(category))),
('{}_lemmatizer.pt', os.path.join(save_dir, embedding_name, category, '{}_lemmatizer.pt'.format(category))),
('{}.ner.mdl', os.path.join(save_dir, embedding_name, category, '{}.ner.mdl'.format(category))),
('{}.ner-vocab.json', os.path.join(save_dir, embedding_name, category, '{}.ner-vocab.json'.format(category)))
]
else:
assert 'Unknown customized lang!'
missing_filenamess = []
for filename, filepath in file_list:
if not os.path.exists(filepath):
print('Missing {}'.format(filepath))
missing_filenamess.append(filename)
download(
cache_dir=save_dir,
language=language,
saved_model_version=saved_model_version, # manually set this to avoid duplicated storage
embedding_name=embedding_name
)
# borrow pretrained files
src_dir = os.path.join(save_dir, embedding_name, language)
tgt_dir = os.path.join(save_dir, embedding_name, category)
for fname in missing_filenamess:
copyfile(os.path.join(src_dir, fname.format(language)), os.path.join(tgt_dir, fname.format(category)))
print('Copying {} to {}'.format(
os.path.join(src_dir, fname.format(language)),
os.path.join(tgt_dir, fname.format(category))
))
remove_with_path(src_dir)
def verify_customized_pipeline(category, save_dir, embedding_name):
assert embedding_name in supported_embeddings, '{} has not been supported. Current supported embeddings: {}'.format(
embedding_name, supported_embeddings)
assert category in {'customized', 'customized-ner', 'customized-mwt',
'customized-mwt-ner'}, "Pipeline category must be one of the following: 'customized', 'customized-ner', 'customized-mwt', 'customized-mwt-ner'"
if category == 'customized':
file_list = [
os.path.join(save_dir, embedding_name, category, '{}.tokenizer.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.tagger.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.vocabs.json'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}_lemmatizer.pt'.format(category))
]
elif category == 'customized-ner':
file_list = [
os.path.join(save_dir, embedding_name, category, '{}.tokenizer.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.tagger.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.vocabs.json'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}_lemmatizer.pt'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.ner.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.ner-vocab.json'.format(category))
]
elif category == 'customized-mwt':
file_list = [
os.path.join(save_dir, embedding_name, category, '{}.tokenizer.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}_mwt_expander.pt'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.tagger.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.vocabs.json'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}_lemmatizer.pt'.format(category))
]
elif category == 'customized-mwt-ner':
file_list = [
os.path.join(save_dir, embedding_name, category, '{}.tokenizer.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}_mwt_expander.pt'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.tagger.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.vocabs.json'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}_lemmatizer.pt'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.ner.mdl'.format(category)),
os.path.join(save_dir, embedding_name, category, '{}.ner-vocab.json'.format(category))
]
else:
assert 'Unknown customized lang!'
verified = True
for filepath in file_list:
if not os.path.exists(filepath):
verified = False
print('Missing {}'.format(filepath))
if verified:
with open(os.path.join(save_dir, embedding_name, category, '{}.downloaded'.format(category)), 'w') as f:
f.write('')
remove_with_path(os.path.join(save_dir, embedding_name, category, 'train.txt.character'))
remove_with_path(os.path.join(save_dir, embedding_name, category, 'logs'))
remove_with_path(os.path.join(save_dir, embedding_name, category, 'preds'))
print(
"Customized pipeline is ready to use!\nIt can be initialized as follows:\n-----------------------------------\nfrom trankit import Pipeline\np = Pipeline(lang='{}', cache_dir='{}')".format(
category, save_dir))
else:
print('Customized pipeline is not ready to use!\nPlease consider the missing files above.')
| 64.817518
| 202
| 0.646059
| 1,050
| 8,880
| 5.280952
| 0.112381
| 0.135978
| 0.097385
| 0.187556
| 0.801082
| 0.788097
| 0.788097
| 0.788097
| 0.767899
| 0.767899
| 0
| 0.000561
| 0.197185
| 8,880
| 136
| 203
| 65.294118
| 0.777248
| 0.00777
| 0
| 0.590551
| 0
| 0.023622
| 0.232126
| 0.005881
| 0
| 0
| 0
| 0
| 0.055118
| 1
| 0.015748
| false
| 0
| 0.070866
| 0
| 0.086614
| 0.03937
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7635f97d7359baca25c54da050966f0d3063841
| 6,002
|
py
|
Python
|
e2e/tests/logging/test_splunk_logging.py
|
dhewapim/identity-service-api
|
6688a295f1175f176c3c50f8d8362dce4bb504a6
|
[
"MIT"
] | null | null | null |
e2e/tests/logging/test_splunk_logging.py
|
dhewapim/identity-service-api
|
6688a295f1175f176c3c50f8d8362dce4bb504a6
|
[
"MIT"
] | 233
|
2020-04-02T15:50:10.000Z
|
2022-01-04T10:53:45.000Z
|
e2e/tests/logging/test_splunk_logging.py
|
dhewapim/identity-service-api
|
6688a295f1175f176c3c50f8d8362dce4bb504a6
|
[
"MIT"
] | 3
|
2021-04-11T07:31:43.000Z
|
2022-01-24T11:18:50.000Z
|
import pytest
import random
import json
from api_test_utils.apigee_api_trace import ApigeeApiTraceDebug
from e2e.scripts import config
@pytest.mark.asyncio
class TestSplunkLoggingFields:
@staticmethod
async def _get_payload_from_splunk(debug):
splunk_content_json = await debug.get_apigee_variable_from_trace(name='splunkCalloutRequest.content')
return json.loads(splunk_content_json)
@pytest.mark.happy_path
@pytest.mark.logging
async def test_splunk_fields_for_authorize_endpoint_for_cis2(self):
debug = ApigeeApiTraceDebug(proxy=config.SERVICE_NAME)
await debug.start_trace()
await self.oauth.hit_oauth_endpoint(
method="GET",
endpoint="authorize",
params={
"client_id": self.oauth.client_id,
"redirect_uri": self.oauth.redirect_uri,
"response_type": "code",
"state": random.getrandbits(32),
},
)
payload = await self._get_payload_from_splunk(debug)
# Then
auth = payload["auth"]
auth_meta = auth["meta"]
assert auth_meta["auth_type"] == "user"
assert auth_meta["grant_type"] == "authorization_code"
assert auth_meta["level"] == "" # level is unknown when hitting /authorize
assert auth_meta["provider"] == "nhs-cis2"
auth_user = auth["user"]
assert auth_user["user_id"] == "" # user_id is unknown when hitting /authorize
@pytest.mark.happy_path
@pytest.mark.logging
async def test_splunk_fields_for_callback_endpoint_for_cis2(self, helper):
# Given
response = await self.oauth.hit_oauth_endpoint(
method="GET",
endpoint="authorize",
params={
"client_id": self.oauth.client_id,
"redirect_uri": self.oauth.redirect_uri,
"response_type": "code",
"state": "1234567890",
},
allow_redirects=False,
)
state = helper.get_param_from_url(
url=response["headers"]["Location"], param="state"
)
# Make simulated auth request to authenticate
response = await self.oauth.hit_oauth_endpoint(
base_uri=config.MOCK_IDP_BASE_URL,
method="POST",
endpoint="simulated_auth",
params={
"response_type": "code",
"client_id": self.oauth.client_id,
"redirect_uri": self.oauth.redirect_uri,
"scope": "openid",
"state": state,
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
data={"state": state},
allow_redirects=False,
)
# Make initial callback request
auth_code = helper.get_param_from_url(
url=response["headers"]["Location"], param="code"
)
# When
debug = ApigeeApiTraceDebug(proxy=config.SERVICE_NAME)
await debug.start_trace()
await self.oauth.hit_oauth_endpoint(
method="GET",
endpoint="callback",
params={"code": auth_code, "client_id": "some-client-id", "state": state},
allow_redirects=False,
)
payload = await self._get_payload_from_splunk(debug)
# Then
auth = payload["auth"]
auth_meta = auth["meta"]
assert auth_meta["auth_type"] == "user"
assert auth_meta["grant_type"] == "authorization_code"
assert auth_meta["level"] == "aal3"
assert auth_meta["provider"] == "nhs-cis2"
auth_user = auth["user"]
assert auth_user["user_id"] == "787807429511"
@pytest.mark.happy_path
@pytest.mark.logging
async def test_splunk_fields_for_callback_endpoint_for_nhs_login(self, helper):
# Given
response = await self.oauth.hit_oauth_endpoint(
method="GET",
endpoint="authorize",
params={
"client_id": self.oauth.client_id,
"redirect_uri": self.oauth.redirect_uri,
"response_type": "code",
"state": "1234567890",
"scope": "nhs-login",
},
allow_redirects=False,
)
state = helper.get_param_from_url(
url=response["headers"]["Location"], param="state"
)
# Make simulated auth request to authenticate
response = await self.oauth.hit_oauth_endpoint(
base_uri=config.MOCK_IDP_BASE_URL,
method="POST",
endpoint="simulated_auth",
params={
"response_type": "code",
"client_id": self.oauth.client_id,
"redirect_uri": self.oauth.redirect_uri,
"scope": "openid",
"state": state,
},
headers={"Content-Type": "application/x-www-form-urlencoded"},
data={"state": state},
allow_redirects=False,
)
# Make initial callback request
auth_code = helper.get_param_from_url(
url=response["headers"]["Location"], param="code"
)
# When
debug = ApigeeApiTraceDebug(proxy=config.SERVICE_NAME)
await debug.start_trace()
await self.oauth.hit_oauth_endpoint(
method="GET",
endpoint="callback",
params={"code": auth_code, "client_id": "some-client-id", "state": state},
allow_redirects=False,
)
payload = await self._get_payload_from_splunk(debug)
# Then
auth = payload["auth"]
auth_meta = auth["meta"]
assert auth_meta["auth_type"] == "user"
assert auth_meta["grant_type"] == "authorization_code"
assert auth_meta["level"] == "p9"
assert auth_meta["provider"] == "apim-mock-nhs-login"
auth_user = auth["user"]
assert auth_user["user_id"] == "9912003071"
| 33.530726
| 109
| 0.578307
| 627
| 6,002
| 5.271132
| 0.172249
| 0.04357
| 0.050832
| 0.036006
| 0.865356
| 0.840242
| 0.840242
| 0.840242
| 0.840242
| 0.829349
| 0
| 0.012283
| 0.308231
| 6,002
| 178
| 110
| 33.719101
| 0.783719
| 0.044652
| 0
| 0.721429
| 0
| 0
| 0.160315
| 0.016434
| 0
| 0
| 0
| 0
| 0.107143
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c796216a216b4c90d0407abfe5a8b5c2f70ac87e
| 5,386
|
py
|
Python
|
test/test_image.py
|
meerk40t/svg.elements
|
761bb315a6c12a8fcea990276570780a07fc492f
|
[
"MIT"
] | 2
|
2019-10-30T12:23:05.000Z
|
2019-12-24T06:37:02.000Z
|
test/test_image.py
|
meerk40t/svg.elements
|
761bb315a6c12a8fcea990276570780a07fc492f
|
[
"MIT"
] | 12
|
2019-10-30T18:50:56.000Z
|
2019-12-21T00:50:02.000Z
|
test/test_image.py
|
meerk40t/svg.elements
|
761bb315a6c12a8fcea990276570780a07fc492f
|
[
"MIT"
] | null | null | null |
import unittest
from svgelements import *
class TestElementImage(unittest.TestCase):
def test_image_datauri(self):
e = Image(href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEACAYAAABccqhmAAAEFklEQVR4nO3dsW7bQBRFwSjw//8yU6UJYDgil9xdnZnalp6LewhBhV/HcRy/gKTfsw8A5hEACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACPuafUDd6/X68WeO43jgEooE4GH/M/iffkcQGEUAHnJm+D+9lhBwlQDcbOTwv3ttIeAsAbjJncP/7r2EgHf5FuAGT45/hfdlXwIw2OwRzn5/9iIAA60yvlXuYH0CMMhqo1vtHtYkAAOsOrZV72IdAnDR6iNb/T7mEoALdhnXLnfyPAGAMAE4aben6m738gwBgDABOGHXp+mud3MfAYAwAXjT7k/R3e9nLAGAMAGAMAGAMAF4w6d8fv6Uv4PrBADCBADCBADCBADCBADCBADCBADCBADCBOANn/Kfdz7l7+A6AYAwAYAwAYAwAXjT7p+fd7+fsQQAwgTghF2forvezX0EAMIE4KTdnqa73cszBADCBOCCXZ6qu9zJ8wTgotXHtfp9zCUAA6w6slXvYh0CMMhqY1vtHtYkAAOtMrpV7mB9AjDY7PHNfn/2IgA3mDVC4+ddX7MP+FR/x/jEf+ExfM4SgJvdGQLD5yoBeMjIEBg+owjAw/4d7/8EweC5iwBMZtzM5FsACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACBMACPsDhkRyDUVGTB8AAAAASUVORK5CYII=")
self.assertEqual(e.data[:6], b"\x89PNG\r\n")
e1 = Image(href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAEAAQADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiq99f2emWcl5f3cFpax43zTyCNFyQBljwMkgfjXj/iH9ozQtPvBBoemT6vGPvzvIbZDwCNoKlj1IOQuMcZBzQB7RRXzB/w0d4w/6Buh/wDfib/47XV+Hv2kLG5uILfxBoslmhRVku7WXzVD5ALGMgFU+8eCxGAMN1oA90orP0bXNL8Q6cl/pF/Be2rYG+F87SQDtYdVbBGVOCM8itCgAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAqvf31vpmnXN/eSeXa2sTzTPtJ2ooJY4HJwAelWK8X/aM8Q3Gn+F9O0OBcR6pKzzvkcpEUITBHdmU5BGNmOQTQB5J8TPiZfeP9UCIJLbRbdybW0J5J6eZJjguR26KDgdSW4OiigAooooA6jwL461TwHrgv7A+bbyYW6tHbCXCDsfRhk4btnuCQfsPw5r9j4p8P2etaa0htLpCyeYu1lIJVlI9QwI4yOOCRzXwpXvH7N3iG4XUdW8NMu61eL7fGcgeW4KRt2ydwZO/GzpyaAPoeiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACvmD9o7/koen/8AYKj/APRstfT9eF/tIeHprnS9I8QW9tGyWjvb3UioS4V8GMsQPuBgw5PBkGPvGgD50ooooAKKKKACvQPgl/yV7Qv+3j/0nkrz+vbP2cvD01z4l1DxBLbRtaWlubeKSRCSJnIOUOMZCBg2DkCQdmoA+k6KKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKz9c0az8Q6He6RfpvtbuJonwASuejLkEBgcEHHBANaFFAHxR468C6p4D1w2F+PNt5Mta3aLhLhB3HowyMr2z3BBPL1916/4c0jxTpbabrVjHd2hcPsYlSrDoVZSCp6jII4JHQmvDPEP7N14t4G8NazA9q3WPUiVePgfxIpD5O7+FccDnrQB4PRXoH/AApL4h/9C9/5O2//AMcrrPD37OWtXNxBL4g1O0s7QoryRWpMs4ORmM5ARTjcNwLAEDhhQB5X4Y8Map4u1yHSNIg824k5Zm4SJB1dz2UZH5gAEkA/Y/gvwnZ+CvC9rotm/m+Vl5pygRppGOWYgfgBnJChRk4zR4T8F6F4K05rPRbTyvN2meZ2LyTMowCzH8TgYUEnAGTXQUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAH//Z")
self.assertEqual(e1.data[:3], b"\xff\xd8\xff")
e2 = Image(href="data:text/plain;base64,c3ZnZWxlbWVudHMgcmVhZHMgc3ZnIGZpbGVz")
self.assertEqual(e2.data, b"svgelements reads svg files")
e3 = Image(href="data:text/vnd-example+xyz;foo=bar;base64,R0lGODdh")
self.assertEqual(e3.data, b"GIF87a")
e4 = Image(href="data:text/plain;charset=UTF-8;page=21,the%20data:1234,5678")
self.assertEqual(e4.data, b"the data:1234,5678")
| 269.3
| 3,214
| 0.939101
| 184
| 5,386
| 27.478261
| 0.706522
| 0.0089
| 0.012856
| 0.010087
| 0.008703
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07113
| 0.023765
| 5,386
| 19
| 3,215
| 283.473684
| 0.890453
| 0
| 0
| 0
| 0
| 0.214286
| 0.913835
| 0.900093
| 0
| 1
| 0
| 0
| 0.357143
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c7d357e3000ff4f95d8d8969a664471980f6e333
| 13,224
|
py
|
Python
|
advex/attacks.py
|
kai-wen-yang/CD-VAE
|
a33b5070d5d936396d51c8c2e7dedd62351ee5b2
|
[
"MIT"
] | 23
|
2021-12-10T02:09:49.000Z
|
2022-03-24T11:46:58.000Z
|
advex/attacks.py
|
kai-wen-yang/CD-VAE
|
a33b5070d5d936396d51c8c2e7dedd62351ee5b2
|
[
"MIT"
] | 6
|
2021-12-20T07:27:31.000Z
|
2022-03-30T07:22:26.000Z
|
advex/attacks.py
|
kai-wen-yang/CD-VAE
|
a33b5070d5d936396d51c8c2e7dedd62351ee5b2
|
[
"MIT"
] | 3
|
2021-12-20T13:38:50.000Z
|
2022-02-20T20:58:45.000Z
|
import torch
from torch import nn
from torch.nn import functional as F
from utils.normalize import *
CIFAR_MEAN = [0.4914, 0.4822, 0.4465]
CIFAR_STD = [0.2470, 0.2435, 0.2616]
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
def Max_Incorrect_Label(logits, labels):
max_2_logits, argmax_2_logits = torch.topk(logits, 2, dim=1)
top_argmax, second_argmax = argmax_2_logits.chunk(2, dim=1)
labels_eq_max = top_argmax.squeeze().eq(labels).float().view(-1, 1)
labels_ne_max = top_argmax.squeeze().ne(labels).float().view(-1, 1)
max_incorrect_label = labels_eq_max * second_argmax + labels_ne_max * top_argmax
return max_incorrect_label
class MarginLoss(nn.Module):
"""
Calculates the margin loss max(kappa, (max z_k (x) k != y) - z_y(x)),
also known as the f6 loss used by the Carlini & Wagner attack.
"""
def __init__(self, kappa=float('inf'), targeted=False):
super().__init__()
self.kappa = kappa
self.targeted = targeted
def forward(self, logits, labels):
correct_logits = torch.gather(logits, 1, labels.view(-1, 1))
max_2_logits, argmax_2_logits = torch.topk(logits, 2, dim=1)
top_max, second_max = max_2_logits.chunk(2, dim=1)
top_argmax, _ = argmax_2_logits.chunk(2, dim=1)
labels_eq_max = top_argmax.squeeze().eq(labels).float().view(-1, 1)
labels_ne_max = top_argmax.squeeze().ne(labels).float().view(-1, 1)
max_incorrect_logits = labels_eq_max * second_max + labels_ne_max * top_max
if self.targeted:
return (correct_logits - max_incorrect_logits) \
.clamp(max=self.kappa).squeeze()
else:
return (max_incorrect_logits - correct_logits) \
.clamp(max=self.kappa).squeeze()
def get_cifar_params(resol):
mean_list = []
std_list = []
for i in range(3):
mean_list.append(torch.full((resol, resol), CIFAR_MEAN[i], device='cuda'))
std_list.append(torch.full((resol, resol), CIFAR_STD[i], device='cuda'))
return torch.unsqueeze(torch.stack(mean_list), 0), torch.unsqueeze(torch.stack(std_list), 0)
def get_imagenet_params(resol):
mean_list = []
std_list = []
for i in range(3):
mean_list.append(torch.full((resol, resol), IMAGENET_MEAN[i], device='cuda'))
std_list.append(torch.full((resol, resol), IMAGENET_STD[i], device='cuda'))
return torch.unsqueeze(torch.stack(mean_list), 0), torch.unsqueeze(torch.stack(std_list), 0)
class CIFARNORMALIZE(nn.Module):
def __init__(self, resol):
super().__init__()
self.mean, self.std = get_cifar_params(resol)
def forward(self, x):
'''
Parameters:
x: input image with pixels normalized to ([0, 1] - IMAGENET_MEAN) / IMAGENET_STD
'''
x = x.sub(self.mean)
x = x.div(self.std)
return x
class CIFARINNORMALIZE(nn.Module):
def __init__(self, resol):
super().__init__()
self.mean, self.std = get_cifar_params(resol)
def forward(self, x):
'''
Parameters:
x: input image with pixels normalized to ([0, 1] - IMAGENET_MEAN) / IMAGENET_STD
'''
x = x.mul(self.std)
x = x.add(*self.mean)
return x
class IMAGENETNORMALIZE(nn.Module):
def __init__(self, resol):
super().__init__()
self.mean, self.std = get_imagenet_params(resol)
def forward(self, x):
'''
Parameters:
x: input image with pixels normalized to ([0, 1] - IMAGENET_MEAN) / IMAGENET_STD
'''
x = x.sub(self.mean)
x = x.div(self.std)
return x
class IMAGENETINNORMALIZE(nn.Module):
def __init__(self, resol):
super().__init__()
self.mean, self.std = get_imagenet_params(resol)
def forward(self, x):
'''
Parameters:
x: input image with pixels normalized to ([0, 1] - IMAGENET_MEAN) / IMAGENET_STD
'''
x = x.mul(self.std)
x = x.add(*self.mean)
return x
class NoAttack(nn.Module):
"""
Attack that does nothing.
"""
def __init__(self, model=None):
super().__init__()
self.model = model
def forward(self, inputs, labels):
return inputs
class PGDAttack(nn.Module):
def __init__(self, model, eps_max=8/255, step_size=None, num_iterations=7, norm='linf', rand_init=True, scale_each=False, loss='ce'):
super().__init__()
self.nb_its = num_iterations
self.eps_max = eps_max
if step_size is None:
step_size = eps_max / (self.nb_its ** 0.5)
self.step_size = step_size
self.norm = norm
self.rand_init = rand_init
self.scale_each = scale_each
self.loss = loss
if self.loss == 'margin':
self.criterion = MarginLoss(kappa=10)
else:
self.criterion = nn.CrossEntropyLoss().cuda()
self.model = model
self.normalize = CIFARNORMALIZE(32)
self.innormalize = CIFARINNORMALIZE(32)
def _init(self, shape, eps):
if self.rand_init:
if self.norm == 'linf':
init = torch.rand(shape, dtype=torch.float32, device='cuda') * 2 - 1
elif self.norm == 'l2':
init = torch.randn(shape, dtype=torch.float32, device='cuda')
init_norm = torch.norm(init.view(init.size()[0], -1), 2.0, dim=1)
normalized_init = init / init_norm[:, None, None, None]
dim = init.size()[1] * init.size()[2] * init.size()[3]
rand_norms = torch.pow(torch.rand(init.size()[0], dtype=torch.float32, device='cuda'), 1/dim)
init = normalized_init * rand_norms[:, None, None, None]
else:
raise NotImplementedError
init = eps[:, None, None, None] * init
init.requires_grad_()
return init
else:
return torch.zeros(shape, requires_grad=True, device='cuda')
def forward(self, img, labels, return_adv_logits=False):
base_eps = self.eps_max * torch.ones(img.size()[0], device='cuda')
step_size = self.step_size * torch.ones(img.size()[0], device='cuda')
img = img.detach()
img.requires_grad = True
delta = self._init(img.size(), base_eps)
out = self.model(self.normalize(img+delta))
if self.norm == 'l2':
l2_max = base_eps
for it in range(self.nb_its):
loss = self.criterion(out, labels)
if self.loss == 'margin':
loss.sum().backward()
else:
loss.backward()
'''
Because of batching, this grad is scaled down by 1 / batch_size, which does not matter
for what follows because of normalization.
'''
grad = delta.grad.data
if self.norm == 'linf':
grad_sign = grad.sign()
delta.data = delta.data + step_size[:, None, None, None] * grad_sign
delta.data = torch.max(torch.min(delta.data, base_eps[:, None, None, None]), -base_eps[:, None, None, None])
delta.data = torch.clamp(img.data + delta.data, 0., 1.) - img.data
elif self.norm == 'l2':
batch_size = delta.data.size()[0]
grad_norm = torch.norm(grad.view(batch_size, -1), 2.0, dim=1)
normalized_grad = grad / grad_norm[:, None, None, None]
delta.data = delta.data + step_size[:, None, None, None] * normalized_grad
l2_delta = torch.norm(delta.data.view(batch_size, -1), 2.0, dim=1)
# Check for numerical instability
proj_scale = torch.min(torch.ones_like(l2_delta, device='cuda'), l2_max / l2_delta)
delta.data *= proj_scale[:, None, None, None]
delta.data = torch.clamp(img.data + delta.data, 0., 1.) - img.data
else:
raise NotImplementedError
if it != self.nb_its - 1:
out = self.model(self.normalize(img + delta))
delta.grad.data.zero_()
delta.data[torch.isnan(delta.data)] = 0
adv_sample = img + delta
max_incorrect_label = Max_Incorrect_Label(out, labels)
if return_adv_logits:
return torch.clamp(adv_sample.detach(), 0, 1 ), max_incorrect_label.squeeze(1).long()
else:
return torch.clamp(adv_sample.detach(), 0, 1)
class AttackV2(nn.Module):
def __init__(self, model, vae, eps_max=8/255, step_size=None, num_iterations=7, norm='linf', rand_init=True, scale_each=False, loss='ce'):
super().__init__()
self.nb_its = num_iterations
self.eps_max = eps_max
if step_size is None:
step_size = eps_max / (self.nb_its ** 0.5)
self.step_size = step_size
self.norm = norm
self.rand_init = rand_init
self.scale_each = scale_each
self.loss = loss
if self.loss == 'margin':
self.criterion = MarginLoss(kappa=10)
else:
self.criterion = nn.CrossEntropyLoss().cuda()
self.model = model
self.vae = vae
self.normalize = CIFARNORMALIZE(32)
self.innormalize = CIFARINNORMALIZE(32)
def _init(self, shape, eps):
if self.rand_init:
if self.norm == 'linf':
init = torch.rand(shape, dtype=torch.float32, device='cuda') * 2 - 1
elif self.norm == 'l2':
init = torch.randn(shape, dtype=torch.float32, device='cuda')
init_norm = torch.norm(init.view(init.size()[0], -1), 2.0, dim=1)
normalized_init = init / init_norm[:, None, None, None]
dim = init.size()[1] * init.size()[2] * init.size()[3]
rand_norms = torch.pow(torch.rand(init.size()[0], dtype=torch.float32, device='cuda'), 1/dim)
init = normalized_init * rand_norms[:, None, None, None]
else:
raise NotImplementedError
init = eps[:, None, None, None] * init
init.requires_grad_()
return init
else:
return torch.zeros(shape, requires_grad=True, device='cuda')
def forward(self, img, labels, return_adv_logits=False):
base_eps = self.eps_max * torch.ones(img.size()[0], device='cuda')
step_size = self.step_size * torch.ones(img.size()[0], device='cuda')
img = img.detach()
img.requires_grad = True
delta = self._init(img.size(), base_eps)
gx, _, _ = self.vae(self.normalize(img+delta))
out_g = self.model(gx)
if self.norm == 'l2':
l2_max = base_eps
for it in range(self.nb_its):
loss = self.criterion(out_g, labels)
if self.loss == 'margin':
loss.sum().backward()
else:
loss.backward()
'''
Because of batching, this grad is scaled down by 1 / batch_size, which does not matter
for what follows because of normalization.
'''
grad = delta.grad.data
if self.norm == 'linf':
grad_sign = grad.sign()
delta.data = delta.data + step_size[:, None, None, None] * grad_sign
delta.data = torch.max(torch.min(delta.data, base_eps[:, None, None, None]), -base_eps[:, None, None, None])
delta.data = torch.clamp(img.data + delta.data, 0., 1.) - img.data
elif self.norm == 'l2':
batch_size = delta.data.size()[0]
grad_norm = torch.norm(grad.view(batch_size, -1), 2.0, dim=1)
normalized_grad = grad / grad_norm[:, None, None, None]
delta.data = delta.data + step_size[:, None, None, None] * normalized_grad
l2_delta = torch.norm(delta.data.view(batch_size, -1), 2.0, dim=1)
# Check for numerical instability
proj_scale = torch.min(torch.ones_like(l2_delta, device='cuda'), l2_max / l2_delta)
delta.data *= proj_scale[:, None, None, None]
delta.data = torch.clamp(img.data + delta.data, 0., 1.) - img.data
else:
raise NotImplementedError
if it != self.nb_its - 1:
gx, _, _ = self.vae(self.normalize(img+delta))
out_g = self.model(gx)
delta.grad.data.zero_()
delta.data[torch.isnan(delta.data)] = 0
adv_sample = img + delta
max_incorrect_label = Max_Incorrect_Label(out_g, labels)
if return_adv_logits:
return torch.clamp(adv_sample.detach(), 0, 1 ), max_incorrect_label.squeeze(1).long()
else:
return torch.clamp(adv_sample.detach(), 0, 1)
| 39.592814
| 145
| 0.56216
| 1,689
| 13,224
| 4.208999
| 0.113677
| 0.040512
| 0.030384
| 0.01266
| 0.871009
| 0.866929
| 0.849346
| 0.836405
| 0.836405
| 0.836405
| 0
| 0.024393
| 0.311782
| 13,224
| 333
| 146
| 39.711712
| 0.75673
| 0.046204
| 0
| 0.809717
| 0
| 0
| 0.011807
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08502
| false
| 0
| 0.016194
| 0.004049
| 0.206478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4015ff78a3691870075fac8f0ebe2c6611a18f3f
| 39
|
py
|
Python
|
src/lib/statvfs.py
|
DTenore/skulpt
|
098d20acfb088d6db85535132c324b7ac2f2d212
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
src/lib/statvfs.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
src/lib/statvfs.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import _sk_fail; _sk_fail._("statvfs")
| 19.5
| 38
| 0.769231
| 6
| 39
| 4.166667
| 0.666667
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.694444
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4023d4faa6b1a4978633073911ce2cc93929d594
| 4,541
|
py
|
Python
|
venv/Lib/site-packages/tensorflow_core/_api/v2/image/__init__.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow_core/_api/v2/image/__init__.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow_core/_api/v2/image/__init__.py
|
TEDxVienna/continuum
|
85cefbc274fc59e2059c313bc0d3b9b93a34ba6d
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Image processing and decoding ops.
See the [Images](https://tensorflow.org/api_guides/python/image) guide.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.ops.array_ops import extract_image_patches_v2 as extract_patches
from tensorflow.python.ops.gen_image_ops import decode_and_crop_jpeg
from tensorflow.python.ops.gen_image_ops import decode_bmp
from tensorflow.python.ops.gen_image_ops import decode_gif
from tensorflow.python.ops.gen_image_ops import decode_jpeg
from tensorflow.python.ops.gen_image_ops import decode_png
from tensorflow.python.ops.gen_image_ops import encode_jpeg
from tensorflow.python.ops.gen_image_ops import encode_png
from tensorflow.python.ops.gen_image_ops import extract_jpeg_shape
from tensorflow.python.ops.gen_image_ops import hsv_to_rgb
from tensorflow.python.ops.gen_image_ops import rgb_to_hsv
from tensorflow.python.ops.image_ops_impl import ResizeMethod
from tensorflow.python.ops.image_ops_impl import adjust_brightness
from tensorflow.python.ops.image_ops_impl import adjust_contrast
from tensorflow.python.ops.image_ops_impl import adjust_gamma
from tensorflow.python.ops.image_ops_impl import adjust_hue
from tensorflow.python.ops.image_ops_impl import adjust_jpeg_quality
from tensorflow.python.ops.image_ops_impl import adjust_saturation
from tensorflow.python.ops.image_ops_impl import central_crop
from tensorflow.python.ops.image_ops_impl import combined_non_max_suppression
from tensorflow.python.ops.image_ops_impl import convert_image_dtype
from tensorflow.python.ops.image_ops_impl import crop_and_resize_v2 as crop_and_resize
from tensorflow.python.ops.image_ops_impl import crop_to_bounding_box
from tensorflow.python.ops.image_ops_impl import decode_image
from tensorflow.python.ops.image_ops_impl import draw_bounding_boxes_v2 as draw_bounding_boxes
from tensorflow.python.ops.image_ops_impl import extract_glimpse_v2 as extract_glimpse
from tensorflow.python.ops.image_ops_impl import flip_left_right
from tensorflow.python.ops.image_ops_impl import flip_up_down
from tensorflow.python.ops.image_ops_impl import grayscale_to_rgb
from tensorflow.python.ops.image_ops_impl import image_gradients
from tensorflow.python.ops.image_ops_impl import is_jpeg
from tensorflow.python.ops.image_ops_impl import non_max_suppression
from tensorflow.python.ops.image_ops_impl import non_max_suppression_padded
from tensorflow.python.ops.image_ops_impl import non_max_suppression_with_overlaps as non_max_suppression_overlaps
from tensorflow.python.ops.image_ops_impl import non_max_suppression_with_scores
from tensorflow.python.ops.image_ops_impl import pad_to_bounding_box
from tensorflow.python.ops.image_ops_impl import per_image_standardization
from tensorflow.python.ops.image_ops_impl import psnr
from tensorflow.python.ops.image_ops_impl import random_brightness
from tensorflow.python.ops.image_ops_impl import random_contrast
from tensorflow.python.ops.image_ops_impl import random_flip_left_right
from tensorflow.python.ops.image_ops_impl import random_flip_up_down
from tensorflow.python.ops.image_ops_impl import random_hue
from tensorflow.python.ops.image_ops_impl import random_jpeg_quality
from tensorflow.python.ops.image_ops_impl import random_saturation
from tensorflow.python.ops.image_ops_impl import resize_image_with_crop_or_pad as resize_with_crop_or_pad
from tensorflow.python.ops.image_ops_impl import resize_image_with_pad_v2 as resize_with_pad
from tensorflow.python.ops.image_ops_impl import resize_images_v2 as resize
from tensorflow.python.ops.image_ops_impl import rgb_to_grayscale
from tensorflow.python.ops.image_ops_impl import rgb_to_yiq
from tensorflow.python.ops.image_ops_impl import rgb_to_yuv
from tensorflow.python.ops.image_ops_impl import rot90
from tensorflow.python.ops.image_ops_impl import sample_distorted_bounding_box_v2 as sample_distorted_bounding_box
from tensorflow.python.ops.image_ops_impl import sobel_edges
from tensorflow.python.ops.image_ops_impl import ssim
from tensorflow.python.ops.image_ops_impl import ssim_multiscale
from tensorflow.python.ops.image_ops_impl import total_variation
from tensorflow.python.ops.image_ops_impl import transpose
from tensorflow.python.ops.image_ops_impl import yiq_to_rgb
from tensorflow.python.ops.image_ops_impl import yuv_to_rgb
from tensorflow.python.ops.random_ops import random_crop
del _print_function
| 59.75
| 114
| 0.885928
| 744
| 4,541
| 5.056452
| 0.139785
| 0.26369
| 0.324296
| 0.37294
| 0.781233
| 0.78017
| 0.771398
| 0.771398
| 0.619351
| 0.350877
| 0
| 0.002124
| 0.066946
| 4,541
| 75
| 115
| 60.546667
| 0.885768
| 0.051531
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.984375
| 0
| 0.984375
| 0.03125
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
40856660a67f2e60a0ce5f6006729c59a27631f6
| 23,723
|
py
|
Python
|
tests/flow/tests_pytorch.py
|
matipan/RedisAI
|
84a1dac3893eb1d33e2bdcf3f71398f521ca461c
|
[
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 652
|
2019-02-09T08:22:31.000Z
|
2022-03-31T18:29:46.000Z
|
tests/flow/tests_pytorch.py
|
matipan/RedisAI
|
84a1dac3893eb1d33e2bdcf3f71398f521ca461c
|
[
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 704
|
2019-01-15T19:59:24.000Z
|
2022-03-27T12:17:39.000Z
|
tests/flow/tests_pytorch.py
|
matipan/RedisAI
|
84a1dac3893eb1d33e2bdcf3f71398f521ca461c
|
[
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | 84
|
2019-02-02T19:13:23.000Z
|
2022-03-29T06:43:30.000Z
|
import redis
import time
from includes import *
from RLTest import Env
'''
python -m RLTest --test tests_pytorch.py --module path/to/redisai.so
'''
def test_pytorch_chunked_modelstore(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model = load_file_content('pt-minimal.pt')
chunk_size = len(model) // 3
model_chunks = [model[i:i + chunk_size] for i in range(0, len(model), chunk_size)]
ret = con.execute_command('AI.MODELSTORE', 'm1{1}', 'TORCH', DEVICE, 'BLOB', model)
ret = con.execute_command('AI.MODELSTORE', 'm2{1}', 'TORCH', DEVICE, 'BLOB', *model_chunks)
model1 = con.execute_command('AI.MODELGET', 'm1{1}', 'BLOB')
model2 = con.execute_command('AI.MODELGET', 'm2{1}', 'BLOB')
env.assertEqual(model1, model2)
ret = con.execute_command('AI.CONFIG', 'MODEL_CHUNK_SIZE', chunk_size)
model2 = con.execute_command('AI.MODELGET', 'm2{1}', 'BLOB')
env.assertEqual(len(model2), len(model_chunks))
env.assertTrue(all([el1 == el2 for el1, el2 in zip(model2, model_chunks)]))
model3 = con.execute_command('AI.MODELGET', 'm2{1}', 'META', 'BLOB')[-1] # Extract the BLOB list from the result
env.assertEqual(len(model3), len(model_chunks))
env.assertTrue(all([el1 == el2 for el1, el2 in zip(model3, model_chunks)]))
def test_pytorch_modelrun(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('pt-minimal.pt')
wrong_model_pb = load_file_content('graph.pb')
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TORCH', DEVICE, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(len(ret), 16)
# TODO: enable me. CI is having issues on GPU asserts of TORCH and CPU
if DEVICE == "CPU":
env.assertEqual(ret[1], b'TORCH')
env.assertEqual(ret[3], b'CPU')
env.assertEqual(ret[5], b'')
env.assertEqual(ret[7], 0)
env.assertEqual(ret[9], 0)
env.assertEqual(ret[15], 0)
# assert there are no inputs or outputs
env.assertEqual(len(ret[11]), 2)
env.assertEqual(len(ret[13]), 1)
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TORCH', DEVICE, 'TAG', 'my:tag:v3', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(len(ret), 16)
env.assertEqual(ret[5], b'my:tag:v3')
# TODO: enable me. CI is having issues on GPU asserts of TORCH and CPU
if DEVICE == "CPU":
env.assertEqual(ret[1], b'TORCH')
env.assertEqual(ret[3], b'CPU')
check_error(env, con, 'AI.MODELSTORE', 'm{1}', 'TORCH', DEVICE, 'BLOB', wrong_model_pb)
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 1, 'c{1}')
ensureSlaveSynced(con, env)
values = con.execute_command('AI.TENSORGET', 'c{1}', 'VALUES')
env.assertEqual(values, [b'4', b'6', b'4', b'6'])
if env.useSlaves:
con2 = env.getSlaveConnection()
values2 = con2.execute_command('AI.TENSORGET', 'c{1}', 'VALUES')
env.assertEqual(values2, values)
def test_pytorch_modelrun_autobatch(env):
if not TEST_PT:
return
con = get_connection(env, '{1}')
model_pb = load_file_content('pt-minimal.pt')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TORCH', 'CPU',
'BATCHSIZE', 4, 'MINBATCHSIZE', 2, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
con.execute_command('AI.TENSORSET', 'd{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
con.execute_command('AI.TENSORSET', 'e{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
ensureSlaveSynced(con, env)
def run():
con = get_connection(env, '{1}')
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 2, 'd{1}', 'e{1}', 'OUTPUTS', 1, 'f{1}')
ensureSlaveSynced(con, env)
t = threading.Thread(target=run)
t.start()
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 1, 'c{1}')
t.join()
ensureSlaveSynced(con, env)
values = con.execute_command('AI.TENSORGET', 'c{1}', 'VALUES')
env.assertEqual(values, [b'4', b'6', b'4', b'6'])
values = con.execute_command('AI.TENSORGET', 'f{1}', 'VALUES')
env.assertEqual(values, [b'4', b'6', b'4', b'6'])
def test_pytorch_modelrun_autobatch_badbatch(env):
if not TEST_PT:
return
con = get_connection(env, '{1}')
model_pb = load_file_content('pt-minimal-bb.pt')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TORCH', 'CPU',
'BATCHSIZE', 4, 'MINBATCHSIZE', 3, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
con.execute_command('AI.TENSORSET', 'd{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
con.execute_command('AI.TENSORSET', 'e{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
ensureSlaveSynced(con, env)
def run():
con = get_connection(env, '{1}')
check_error_message(env, con, "Model did not generate the expected batch size",
'AI.MODELEXECUTE', 'm{1}', 'INPUTS', 2, 'd{1}', 'e{1}', 'OUTPUTS', 2, 'f1{1}', 'f2{1}')
t = threading.Thread(target=run)
t.start()
check_error_message(env, con, "Model did not generate the expected batch size",
'AI.MODELEXECUTE', 'm{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 2, 'c1{1}', 'c2{1}')
t.join()
def test_pytorch_modelinfo(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('pt-minimal.pt')
model_key = 'm{1}'
tensor_a_key = 'a{1}'
tensor_b_key = 'b{1}'
tensor_c_key = 'c{1}'
ret = con.execute_command('AI.MODELSTORE', model_key, 'TORCH', DEVICE, 'TAG', 'asdf', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', tensor_a_key, 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', tensor_b_key, 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
previous_duration = 0
for call in range(1, 100):
ret = con.execute_command('AI.MODELEXECUTE', model_key, 'INPUTS', 2, tensor_a_key, tensor_b_key, 'OUTPUTS', 1, tensor_c_key)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
info = con.execute_command('AI.INFO', model_key)
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['key'], model_key)
env.assertEqual(info_dict_0['type'], 'MODEL')
env.assertEqual(info_dict_0['backend'], 'TORCH')
env.assertEqual(info_dict_0['device'], DEVICE)
env.assertEqual(info_dict_0['tag'], 'asdf')
env.assertTrue(info_dict_0['duration'] > previous_duration)
env.assertEqual(info_dict_0['samples'], 2 * call)
env.assertEqual(info_dict_0['calls'], call)
env.assertEqual(info_dict_0['errors'], 0)
previous_duration = info_dict_0['duration']
res = con.execute_command('AI.INFO', model_key, 'RESETSTAT')
env.assertEqual(res, b'OK')
info = con.execute_command('AI.INFO', model_key)
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['duration'], 0)
env.assertEqual(info_dict_0['samples'], 0)
env.assertEqual(info_dict_0['calls'], 0)
env.assertEqual(info_dict_0['errors'], 0)
def test_pytorch_scriptget(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
con.execute_command('DEL', 'EMPTY{1}')
# ERR no script at key from SCRIPTGET
check_error_message(env, con, "script key is empty", 'AI.SCRIPTGET', 'EMPTY{1}')
con.execute_command('SET', 'NOT_SCRIPT{1}', 'BAR')
# ERR wrong type from SCRIPTGET
check_error_message(env, con, "WRONGTYPE Operation against a key holding the wrong kind of value", 'AI.SCRIPTGET', 'NOT_SCRIPT{1}')
script = load_file_content('script.txt')
ret = con.execute_command('AI.SCRIPTSTORE', 'my_script{1}', DEVICE, 'TAG', 'my_tag',
'ENTRY_POINTS', 2, 'bar', 'bar_variadic', 'SOURCE', script)
env.assertEqual(ret, b'OK')
# return meta + source
_, device, _, tag, _, entry_points, _, source = con.execute_command('AI.SCRIPTGET', 'my_script{1}')
env.assertEqual([device, tag, entry_points, source], [bytes(DEVICE, "utf8"), b"my_tag", [b'bar', b'bar_variadic'], script])
# return source only
source = con.execute_command('AI.SCRIPTGET', 'my_script{1}', 'SOURCE')
env.assertEqual(source, script)
def test_pytorch_scriptdel(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
script = load_file_content('script.txt')
ret = con.execute_command('AI.SCRIPTSTORE', 'ket{1}', DEVICE, 'ENTRY_POINTS', 2, 'bar', 'bar_variadic', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.SCRIPTDEL', 'ket{1}')
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
env.assertFalse(con.execute_command('EXISTS', 'ket{1}'))
if env.useSlaves:
con2 = env.getSlaveConnection()
env.assertFalse(con2.execute_command('EXISTS', 'ket{1}'))
con.execute_command('DEL', 'EMPTY{1}')
# ERR no script at key from SCRIPTDEL
check_error_message(env, con, "script key is empty", 'AI.SCRIPTDEL', 'EMPTY{1}')
con.execute_command('SET', 'NOT_SCRIPT{1}', 'BAR')
# ERR wrong type from SCRIPTDEL
check_error_message(env, con, "WRONGTYPE Operation against a key holding the wrong kind of value", 'AI.SCRIPTDEL', 'NOT_SCRIPT{1}')
def test_pytorch_scriptexecute(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
script = load_file_content('script.txt')
ret = con.execute_command('AI.SCRIPTSTORE', 'myscript{1}', DEVICE, 'TAG', 'version1', 'ENTRY_POINTS', 2, 'bar', 'bar_variadic', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
for _ in range( 0,100):
ret = con.execute_command('AI.SCRIPTEXECUTE', 'myscript{1}', 'bar', 'KEYS', 1, '{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 1, 'c{1}')
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
info = con.execute_command('AI.INFO', 'myscript{1}')
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['key'], 'myscript{1}')
env.assertEqual(info_dict_0['type'], 'SCRIPT')
env.assertEqual(info_dict_0['backend'], 'TORCH')
env.assertEqual(info_dict_0['tag'], 'version1')
env.assertTrue(info_dict_0['duration'] > 0)
env.assertEqual(info_dict_0['samples'], -1)
env.assertEqual(info_dict_0['calls'], 100)
env.assertEqual(info_dict_0['errors'], 0)
values = con.execute_command('AI.TENSORGET', 'c{1}', 'VALUES')
env.assertEqual(values, [b'4', b'6', b'4', b'6'])
ensureSlaveSynced(con, env)
if env.useSlaves:
con2 = env.getSlaveConnection()
values2 = con2.execute_command('AI.TENSORGET', 'c{1}', 'VALUES')
env.assertEqual(values2, values)
def test_pytorch_scriptexecute_list_input(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
script = load_file_content('script.txt')
ret = con.execute_command('AI.SCRIPTSTORE', 'myscript{$}', DEVICE, 'TAG', 'version1', 'ENTRY_POINTS', 2, 'bar', 'bar_variadic', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'a{$}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'b1{$}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'b2{$}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
for _ in range( 0,100):
ret = con.execute_command('AI.SCRIPTEXECUTE', 'myscript{$}', 'bar_variadic', 'KEYS', 1, '{$}', 'INPUTS', 3, 'a{$}', 'b1{$}', 'b2{$}', 'OUTPUTS', 1, 'c{$}')
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
info = con.execute_command('AI.INFO', 'myscript{$}')
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['key'], 'myscript{$}')
env.assertEqual(info_dict_0['type'], 'SCRIPT')
env.assertEqual(info_dict_0['backend'], 'TORCH')
env.assertEqual(info_dict_0['tag'], 'version1')
env.assertTrue(info_dict_0['duration'] > 0)
env.assertEqual(info_dict_0['samples'], -1)
env.assertEqual(info_dict_0['calls'], 100)
env.assertEqual(info_dict_0['errors'], 0)
values = con.execute_command('AI.TENSORGET', 'c{$}', 'VALUES')
env.assertEqual(values, [b'4', b'6', b'4', b'6'])
ensureSlaveSynced(con, env)
if env.useSlaves:
con2 = env.getSlaveConnection()
values2 = con2.execute_command('AI.TENSORGET', 'c{$}', 'VALUES')
env.assertEqual(values2, values)
def test_pytorch_scriptexecute_with_timeout(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{$}')
script = load_file_content('script.txt')
ret = con.execute_command('AI.SCRIPTSTORE', 'my_script{$}', DEVICE,
'ENTRY_POINTS', 2, 'bar', 'long_func', 'SOURCE', script)
env.assertEqual(ret, b'OK')
con.execute_command('AI.TENSORSET', 'a{$}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
con.execute_command('AI.TENSORSET', 'b{$}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
def run():
con2 = get_connection(env, '{$}')
con2.execute_command('AI.SCRIPTEXECUTE', 'my_script{$}', 'long_func', 'KEYS', 1, '{$}')
t = threading.Thread(target=run)
t.start()
# make sure that we have a long operation that RedisAI will run upon sending the following
# command, to assure that timeout will occur.
time.sleep(0.5)
ret = con.execute_command('AI.SCRIPTEXECUTE', 'my_script{$}', 'bar',
'INPUTS', 2, 'a{$}', 'b{$}', 'OUTPUTS', 1, 'c{$}', 'TIMEOUT', 1)
env.assertEqual(ret, b'TIMEDOUT')
t.join()
def test_pytorch_scriptinfo(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
script = load_file_content('script.txt')
ret = con.execute_command('AI.SCRIPTSTORE', 'ket_script{1}', DEVICE, 'ENTRY_POINTS', 2, 'bar', 'bar_variadic', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
previous_duration = 0
for call in range(1, 100):
ret = con.execute_command('AI.SCRIPTEXECUTE', 'ket_script{1}', 'bar', 'KEYS', 1, '{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 1, 'c{1}')
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
info = con.execute_command('AI.INFO', 'ket_script{1}')
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['key'], 'ket_script{1}')
env.assertEqual(info_dict_0['type'], 'SCRIPT')
env.assertEqual(info_dict_0['backend'], 'TORCH')
env.assertEqual(info_dict_0['device'], DEVICE)
env.assertTrue(info_dict_0['duration'] > previous_duration)
env.assertEqual(info_dict_0['samples'], -1)
env.assertEqual(info_dict_0['calls'], call)
env.assertEqual(info_dict_0['errors'], 0)
previous_duration = info_dict_0['duration']
res = con.execute_command('AI.INFO', 'ket_script{1}', 'RESETSTAT')
env.assertEqual(res, b'OK')
info = con.execute_command('AI.INFO', 'ket_script{1}')
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['duration'], 0)
env.assertEqual(info_dict_0['samples'], -1)
env.assertEqual(info_dict_0['calls'], 0)
env.assertEqual(info_dict_0['errors'], 0)
def test_pytorch_scriptexecute_disconnect(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
if DEVICE == "GPU":
env.debugPrint("skipping {} since it's hanging CI".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
script = load_file_content('script.txt')
ret = con.execute_command('AI.SCRIPTSTORE', 'ket_script{1}', DEVICE, 'ENTRY_POINTS', 2, 'bar', 'bar_variadic', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = send_and_disconnect(('AI.SCRIPTEXECUTE', 'ket_script{1}', 'bar', 'KEYS', '{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 1, 'c{1}'), con)
env.assertEqual(ret, None)
def test_pytorch_modelrun_disconnect(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
if DEVICE == "GPU":
env.debugPrint("skipping {} since it's hanging CI".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('pt-minimal.pt')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TORCH', DEVICE, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = send_and_disconnect(('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 1, 'c{1}'), con)
env.assertEqual(ret, None)
def test_pytorch_modelscan_scriptscan(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
# ensure cleaned DB
# env.flush()
model_pb = load_file_content('pt-minimal.pt')
ret = con.execute_command('AI.MODELSTORE', 'm1{1}', 'TORCH', DEVICE, 'TAG', 'm:v1', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.MODELSTORE', 'm2{1}', 'TORCH', DEVICE, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
script = load_file_content('script.txt')
ret = con.execute_command('AI.SCRIPTSTORE', 's1{1}', DEVICE, 'TAG', 's:v1', 'ENTRY_POINTS', 2, 'bar', 'bar_variadic', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.SCRIPTSTORE', 's2{1}', DEVICE, 'ENTRY_POINTS', 2, 'bar', 'bar_variadic', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI._MODELSCAN')
env.assertEqual(2, len(ret[0]))
env.assertEqual(2, len(ret[1]))
ret = con.execute_command('AI._SCRIPTSCAN')
env.assertEqual(2, len(ret[0]))
env.assertEqual(2, len(ret[1]))
def test_parallelism():
env = Env(moduleArgs='INTRA_OP_PARALLELISM 1 INTER_OP_PARALLELISM 1')
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('pt-minimal.pt')
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
ret = con.execute_command('AI.TENSORSET', 'b{1}', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3)
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TORCH', DEVICE, 'BLOB', model_pb)
ensureSlaveSynced(con, env)
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 2, 'a{1}', 'b{1}', 'OUTPUTS', 1, 'c{1}')
ensureSlaveSynced(con, env)
values = con.execute_command('AI.TENSORGET', 'c{1}', 'VALUES')
env.assertEqual(values, [b'4', b'6', b'4', b'6'])
load_time_config = get_info_section(con, 'load_time_configs')
env.assertEqual(load_time_config["ai_inter_op_parallelism"], "1")
env.assertEqual(load_time_config["ai_intra_op_parallelism"], "1")
env = Env(moduleArgs='INTRA_OP_PARALLELISM 2 INTER_OP_PARALLELISM 2')
load_time_config = get_info_section(con, 'load_time_configs')
env.assertEqual(load_time_config["ai_inter_op_parallelism"], "2")
env.assertEqual(load_time_config["ai_intra_op_parallelism"], "2")
def test_modelget_for_tuple_output(env):
if not TEST_PT:
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('pt-minimal-bb.pt')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TORCH', DEVICE, 'BLOB', model_pb)
ensureSlaveSynced(con, env)
env.assertEqual(b'OK', ret)
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(ret[1], b'TORCH')
env.assertEqual(ret[5], b'')
env.assertEqual(ret[7], 0)
env.assertEqual(ret[9], 0)
env.assertEqual(ret[15], 0)
env.assertEqual(len(ret[11]), 2)
env.assertEqual(len(ret[13]), 2)
def test_torch_info(env):
if not TEST_PT:
env.debugPrint("skipping {}".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
backends_info = get_info_section(con, 'backends_info')
env.assertFalse('ai_Torch_version' in backends_info)
model_pb = load_file_content('pt-minimal-bb.pt')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TORCH', DEVICE, 'BLOB', model_pb)
backends_info = get_info_section(con, 'backends_info')
env.assertTrue('ai_Torch_version' in backends_info)
| 37.835726
| 163
| 0.635038
| 3,427
| 23,723
| 4.227021
| 0.073534
| 0.117907
| 0.105619
| 0.111487
| 0.893552
| 0.870772
| 0.84378
| 0.831354
| 0.819067
| 0.790487
| 0
| 0.030657
| 0.181891
| 23,723
| 626
| 164
| 37.896166
| 0.715736
| 0.0231
| 0
| 0.723256
| 0
| 0
| 0.217597
| 0.003985
| 0
| 0
| 0
| 0.001597
| 0.306977
| 1
| 0.046512
| false
| 0
| 0.009302
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
40c94a124619f0469f457e0a814580b9bac1d7f7
| 23,214
|
py
|
Python
|
Anomaly_generation/src/generation_model.py
|
msc-acse/acse-9-independent-research-project-FistOfHit
|
c8be3f738190f37392dae7b910b08b4775769af3
|
[
"MIT"
] | null | null | null |
Anomaly_generation/src/generation_model.py
|
msc-acse/acse-9-independent-research-project-FistOfHit
|
c8be3f738190f37392dae7b910b08b4775769af3
|
[
"MIT"
] | null | null | null |
Anomaly_generation/src/generation_model.py
|
msc-acse/acse-9-independent-research-project-FistOfHit
|
c8be3f738190f37392dae7b910b08b4775769af3
|
[
"MIT"
] | null | null | null |
# Hitesh Kumar
# GitHub alias: FistOfHit
# CID: 01058403
#Imports
import generation_support_functions as gsf
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn.functional as F
import torch.nn.init as init
import torch.nn as nn
from torch import optim
device = 'cpu'
if torch.cuda.device_count() > 0 and torch.cuda.is_available():
device = 'cuda'
def init_weights(model, gain):
"""
Initialise weights of model given activation function.
Parameters
----------
model: torch NN model
Model to initialise weights on
gain: Float
Gain (scale factor) for some non-linearity
Returns
-------
None
"""
activ_applied = model.non_linearity
# Go through all parameters
num_layer = 0
for i in list(model.parameters()):
# If this is a weight array
if len(list(i.data.shape)) == 2:
# If non linearity has been applied here or not
if activ_applied[num_layer]:
init.xavier_uniform_(i.data, gain)
else:
init.xavier_uniform_(i.data)
num_layer = min(num_layer+1, len(activ_applied)-1)
# If this is a bias array
else:
i.data.fill_(torch.normal(torch.zeros(1), 0.1).item())
return
def train_gan(D, G, real_loader, seq_length, stride, latent_size, num_epochs,
tag):
"""
Train original GAN model.
Parameters
----------
D: torch nn model
Discriminator model
G: torch nn model
Generator model
real_loader: torch DataLoader
Data loader for real data from dataset
seq_length: Integer
Number of timesteps in each sequence
stride: Integer
Length of gap between consecutive series
latent_size: Integer
Size of latent space to sample from for G
num_epochs: Integer
Number of epochs to train for
tag: Integer
Tag index used for training and evaluation
Returns
-------
None.
"""
# Optimisers
d_optimiser = optim.Adam(D.parameters(), lr=5e-6)
g_optimiser = optim.Adam(G.parameters(), lr=5e-6, weight_decay=0.01)
d_losses = []
g_losses = []
loss_function = nn.BCELoss()
min_jsdiv = 0
# Will detach in training when needed
G.train()
D.train()
for epoch in range(num_epochs):
# Analyse distributions of real and generated data
if epoch % 5 == 0:
stats = gsf.assess_generator(D, G, real_loader, seq_length, stride,
latent_size, tag)
D.train()
G.train()
# Save best model as we go along
if stats[0] < min_jsdiv:
torch.save(D.state_dict(), "./Disciminator.pth")
torch.save(G.state_dict(), "./Generator.pth")
min_jsdiv = stats[0]
tot_disc_loss = 0
tot_gen_loss = 0
for real_data in real_loader:
# -----------------------------
# Train Discriminator
# -----------------------------
d_optimiser.zero_grad()
# Create an equal amount of fake data
real_data = real_data[0].to(device)
# Forward pass for real data score
real_scores = D.forward(real_data).cpu()
real_scores = F.sigmoid(real_scores)
# Real labels with smoothing
real_labels = torch.ones_like(real_scores)
real_labels += torch.zeros_like(real_labels).uniform_(-0.2, 0.2)
# Maximise log(D(x))
real_loss = loss_function(real_scores, real_labels)
# Generate fake data
batch_size = real_data.shape[0]
mean_tensor = torch.zeros(batch_size, seq_length, latent_size)
latent_vectors = torch.normal(mean=mean_tensor, std=1).to(device)
# Forward pass for fake data score
fake_data = G.forward(latent_vectors).detach()
fake_scores = D.forward(fake_data).cpu()
fake_scores = F.sigmoid(fake_scores)
# Fake labels with smoothing
fake_labels = torch.zeros_like(fake_scores)
fake_labels += torch.zeros_like(fake_labels).uniform_(0, 0.3)
# Minimise log(1 - D(G(z)))
fake_loss = loss_function(fake_scores, fake_labels)
d_loss = real_loss + fake_loss
d_loss.backward()
d_optimiser.step()
# -----------------------------
# Train Generator
# -----------------------------
g_optimiser.zero_grad()
# Generate fake data
batch_size = real_data[0].shape[0]
mean_tensor = torch.zeros(batch_size, seq_length, latent_size)
latent_vectors = torch.normal(mean=mean_tensor, std=1).to(device)
# Forward pass for fake data score
fake_data = G.forward(latent_vectors)
fake_scores = D.forward(fake_data).cpu()
fake_scores = F.sigmoid(fake_scores)
# Maximise Log(D(G(z)))
fake_labels = torch.ones_like(fake_scores)
gen_loss = loss_function(fake_scores, fake_labels)
gen_loss.backward()
g_optimiser.step()
# Track running values
tot_disc_loss += d_loss.item()
tot_gen_loss += gen_loss.item()
g_losses.append(tot_gen_loss)
d_losses.append(tot_disc_loss)
print("Epoch: %d, Discriminator loss: %.4f (Real: %.4f, Fake: %.4f)" %
(epoch+1, tot_disc_loss, real_loss.item(), fake_loss.item()))
print("Epoch: %d, Generator loss: %.4f \n" % (epoch+1, tot_gen_loss))
epoch += 1
# Plot lossess over time
fig, ax = plt.subplots(figsize=(15, 8))
x_axis = np.arange(len(d_losses))
plt.plot(x_axis, d_losses, 'b')
plt.title("Discriminator Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
fig, ax = plt.subplots(figsize=(15, 8))
x_axis = np.arange(len(g_losses))
plt.plot(x_axis, g_losses, 'r')
plt.title("Generator Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
return
def train_lsgan(D, G, real_loader, seq_length, stride, latent_size, num_epochs,
tag):
"""
Train Least-Squares GAN model.
Parameters
----------
D: torch nn model
Discriminator model
G: torch nn model
Generator model
real_loader: torch DataLoader
Data loader for real data from dataset
seq_length: Integer
Number of timesteps in each sequence
stride: Integer
Length of gap between consecutive series
latent_size: Integer
Size of latent space to sample from for G
num_epochs: Integer
Number of epochs to train for
tag: Integer
Tag index used for training and evaluation
Returns
-------
None.
"""
# Optimisers
d_optimiser = optim.Adam(D.parameters(), lr=5e-6)
g_optimiser = optim.Adam(G.parameters(), lr=5e-6, weight_decay=0.01)
d_losses = []
g_losses = []
min_jsdiv = 0
# Will detach in training when needed
G.train()
D.train()
for epoch in range(num_epochs):
# Analyse distributions of real and generated data
if epoch % 5 == 0:
stats = gsf.assess_generator(D, G, real_loader, seq_length, stride,
latent_size, tag)
D.train()
G.train()
# Save best model as we go along
if stats[0] < min_jsdiv:
torch.save(D.state_dict(), "./Disciminator.pth")
torch.save(G.state_dict(), "./Generator.pth")
min_jsdiv = stats[0]
tot_disc_loss = 0
tot_gen_loss = 0
for real_data in real_loader:
# -----------------------------
# Train Discriminator
# -----------------------------
d_optimiser.zero_grad()
# Create an equal amount of fake data
real_data = real_data[0].to(device)
# Forward pass for real data score
real_scores = D.forward(real_data).cpu()
# Minimise E(D(G(z))^2)
real_loss = torch.mean((real_scores - 1)**2)
# Generate fake data
batch_size = real_data.shape[0]
mean_tensor = torch.zeros(batch_size, seq_length, latent_size)
latent_vectors = torch.normal(mean=mean_tensor, std=1).to(device)
# Forward pass for fake data score
fake_data = G.forward(latent_vectors).detach()
fake_scores = D.forward(fake_data).cpu()
# Minimise E(D(G(z))^2)
fake_loss = torch.mean((fake_scores)**2)
d_loss = real_loss + fake_loss
d_loss.backward()
d_optimiser.step()
# -----------------------------
# Train Generator
# -----------------------------
g_optimiser.zero_grad()
# Generate fake data
batch_size = real_data[0].shape[0]
mean_tensor = torch.zeros(batch_size, seq_length, latent_size)
latent_vectors = torch.normal(mean=mean_tensor, std=1).to(device)
# Forward pass for fake data score
fake_data = G.forward(latent_vectors)
fake_score = D.forward(fake_data).cpu()
# Minimise E((D(G(z)) - 1)^2)
gen_loss = torch.mean((fake_score - 1)**2)
gen_loss.backward()
g_optimiser.step()
# Track running values
tot_disc_loss += d_loss.item()
tot_gen_loss += gen_loss.item()
g_losses.append(tot_gen_loss)
d_losses.append(tot_disc_loss)
print("Epoch: %d, Discriminator loss: %.4f (Real: %.4f, Fake: %.4f)" %
(epoch+1, tot_disc_loss, real_loss.item(), fake_loss.item()))
print("Epoch: %d, Generator loss: %.4f \n" % (epoch+1, tot_gen_loss))
epoch += 1
# Plot lossess over time
fig, ax = plt.subplots(figsize=(15, 8))
x_axis = np.arange(len(d_losses))
plt.plot(x_axis, d_losses, 'b')
plt.title("Discriminator Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
fig, ax = plt.subplots(figsize=(15, 8))
x_axis = np.arange(len(g_losses))
plt.plot(x_axis, g_losses, 'r')
plt.title("Generator Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
return
def train_wgan(D, G, real_loader, seq_length, stride, latent_size,
num_cycles, weight_limit=0.01, tag):
"""
Train Wasserstein-GAN model.
Parameters
----------
D: torch nn model
Discriminator model
G: torch nn model
Generator model
real_loader: torch DataLoader
Data loader for real data from dataset
seq_length: Integer
Length of each sequence in leadup
stride: Integer
Length of gap between consecutive series
latent_size: Integer
Size of latent space vectors
num_cycles: Integer
Number of cycles to train
weight_limit: Float (default=0.01)
Symmetric magnitude limit at which weights are clipped
tag: Integer
Tag index used for training and evaluation
Returns
-------
None.
"""
# Optimisers
d_optimiser = optim.RMSprop(D.parameters(), lr=5e-5)
g_optimiser = optim.RMSprop(G.parameters(), lr=1e-5)
d_losses = []
g_losses = []
min_jsd = 0
# Will detach in training when needed
G.train()
D.train()
for cycle in range(num_cycles):
# Analyse distributions of real and generated data
if cycle % 5 == 0:
stats = gsf.assess_generator(D, G, real_loader, seq_length, stride,
latent_size, tag)
D.train()
G.train()
# Save best model as we go along
if stats[0] < min_jsd:
torch.save(D.state_dict(), "./Disciminator.pth")
torch.save(G.state_dict(), "./Generator.pth")
min_jsd = stats[0]
print("\n")
# Train both for one cycle
print("Cycle: %d" % (cycle+1))
# -----------------------------
# Train Discriminator
# -----------------------------
print("Training Discriminator")
for epoch in range(5):
# Train on equal parts seperate real and fake data
tot_real_out = 0
tot_fake_out = 0
tot_disc_loss = 0
for real_data in real_loader:
d_optimiser.zero_grad()
# Create an equal amount of fake data
real_data = real_data[0].to(device)
# Forward pass for real data score
real_score = D.forward(real_data)
# Maximise E(D(x))
real_loss = torch.mean(real_score)
# Generate fake data
batch_size = real_data.shape[0]
mean_tensor = torch.zeros(batch_size, seq_length, latent_size)
latent_vectors = torch.normal(mean=mean_tensor, std=1).to(device)
# Forward pass for fake data score
fake_data = G.forward(latent_vectors).detach()
fake_score = D.forward(fake_data)
# Minimise E(D(G(z)))
fake_loss = torch.mean(fake_score)
# Accumulate loss and backward
disc_loss = -torch.mean(real_score) + torch.mean(fake_score)
disc_loss.backward()
d_optimiser.step()
# Weight clipping to enforce Lipschitz condition (weak)
for param in D.parameters():
param.data.clamp_(-weight_limit, weight_limit)
# Track running values
tot_real_out += real_loss.item()
tot_fake_out += fake_loss.item()
tot_disc_loss += disc_loss.cpu().item()
d_losses.append(tot_disc_loss)
print("Epoch: %d, Discriminator loss: %.4f" %
(epoch+1, tot_disc_loss))
print("Real out: %.4f, Fake out: %.4f" %
(tot_real_out, tot_fake_out))
# -----------------------------
# Train Generator
# -----------------------------
print("Training Generator")
tot_gen_loss = 0
for real_data in real_loader:
g_optimiser.zero_grad()
# Generate fake data
batch_size = real_data[0].shape[0]
mean_tensor = torch.zeros(batch_size, seq_length, latent_size)
latent_vectors = torch.normal(mean=mean_tensor, std=1).to(device)
# Forward pass for fake data score
fake_data = G.forward(latent_vectors)
fake_score = D.forward(fake_data)
# Loss for fake data - Maximise E(D(G(z)))
gen_loss = -torch.mean(fake_score)
gen_loss.backward()
g_optimiser.step()
tot_gen_loss += gen_loss.cpu().item()
g_losses.append(tot_gen_loss)
print("Epoch: %d, Generator loss: %.4f" % (epoch+1, tot_gen_loss))
# Plot lossess over time
fig, ax = plt.subplots(figsize=(15, 8))
x_axis = np.arange(len(d_losses))
plt.plot(x_axis, d_losses, 'b')
plt.title("Discriminator Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
fig, ax = plt.subplots(figsize=(15, 8))
x_axis = np.arange(len(g_losses))
plt.plot(x_axis, g_losses, 'r')
plt.title("Generator Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
return
def train_wgan_gp(D, G, real_loader, seq_length, stride, latent_size,
num_cycles, tag):
"""
Train "Wasserstein-GAN with Gradient penalty" model.
Parameters
----------
D: torch nn model
Discriminator model
G: torch nn model
Generator model
real_loader: torch DataLoader
Data loader for real data from dataset
seq_length: Integer
Length of each sequence in leadup
stride: Integer
Length of gap between consecutive series
latent_size: Integer
Size of latent space vectors
num_cycles: Integer
Number of cycles to train
tag: Integer
Tag index used for training and evaluation
Returns
-------
None.
"""
# Optimisers
d_optimiser = optim.RMSprop(D.parameters(), lr=5e-5)
g_optimiser = optim.RMSprop(G.parameters(), lr=1e-5)
d_losses = []
g_losses = []
min_jsd = 0
# Will detach in training when needed
G.train()
D.train()
for cycle in range(num_cycles):
# Analyse distributions of real and generated data
if cycle % 5 == 0:
stats = gsf.assess_generator(D, G, real_loader, seq_length, stride,
latent_size, tag)
D.train()
G.train()
# Save best model as we go along
if stats[0] < min_jsd:
torch.save(D.state_dict(), "./Disciminator.pth")
torch.save(G.state_dict(), "./Generator.pth")
min_jsd = stats[0]
print("\n")
# Train both for one cycle
print("Cycle: %d" % (cycle+1))
# -----------------------------
# Train Discriminator
# -----------------------------
print("Training Discriminator")
for epoch in range(5):
# Train on equal parts seperate real and fake data
tot_real_out = 0
tot_fake_out = 0
tot_gp_loss = 0
tot_disc_loss = 0
for real_data in real_loader:
d_optimiser.zero_grad()
# Create an equal amount of fake data
real_data = real_data[0].to(device)
# Forward pass for real data score
real_score = D.forward(real_data)
# Maximise E(D(x))
real_loss = torch.mean(real_score)
# Generate fake data
batch_size = real_data.shape[0]
mean_tensor = torch.zeros(batch_size, seq_length, latent_size)
latent_vectors = torch.normal(mean=mean_tensor, std=1).to(device)
# Forward pass for fake data score
fake_data = G.forward(latent_vectors).detach()
fake_score = D.forward(fake_data)
# Minimise E(D(G(z)))
fake_loss = torch.mean(fake_score)
# Penalising gradient norms to enforce Lipschitz condition
t = torch.rand(1).item()
interp_in = (t*real_data + (1 - t)*fake_data).to(device).requires_grad_(True)
interp_out = D(interp_in)
grad_out = torch.ones_like(interp_out, requires_grad=True).to(device)
gradients = autograd.grad(outputs=interp_out, inputs=interp_in,
grad_outputs=grad_out, create_graph=True,
retain_graph=True, only_inputs=True)[0]
grad_penalty = 10*(torch.mean((gradients.norm(2, dim=1) - 1)**2))
# Accumulate loss and backward
disc_loss = -torch.mean(real_score) + torch.mean(fake_score) \
+ grad_penalty
disc_loss.backward()
d_optimiser.step()
# Track running values
tot_real_out += real_loss.item()
tot_fake_out += fake_loss.item()
tot_gp_loss += grad_penalty.item()
tot_disc_loss += disc_loss.cpu().item()
d_losses.append(tot_disc_loss)
print("Epoch: %d, Discriminator loss: %.4f" %
(epoch+1, tot_disc_loss))
print("Real out: %.4f, Fake out: %.4f, GP_loss: %.4f" %
(tot_real_out, tot_fake_out, tot_gp_loss))
# -----------------------------
# Train Generator
# -----------------------------
print("Training Generator")
tot_gen_loss = 0
for real_data in real_loader:
g_optimiser.zero_grad()
# Generate fake data
batch_size = real_data[0].shape[0]
mean_tensor = torch.zeros(batch_size, seq_length, latent_size)
latent_vectors = torch.normal(mean=mean_tensor, std=1).to(device)
# Forward pass for fake data score
fake_data = G.forward(latent_vectors)
fake_score = D.forward(fake_data)
# Loss for fake data - Maximise E(D(G(z)))
gen_loss = -torch.mean(fake_score)
gen_loss.backward()
g_optimiser.step()
tot_gen_loss += gen_loss.cpu().item()
g_losses.append(tot_gen_loss)
print("Epoch: %d, Generator loss: %.4f" % (epoch+1, tot_gen_loss))
# Plot lossess over time
fig, ax = plt.subplots(figsize=(15, 8))
x_axis = np.arange(len(d_losses))
plt.plot(x_axis, d_losses, 'b')
plt.title("Discriminator Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
fig, ax = plt.subplots(figsize=(15, 8))
x_axis = np.arange(len(g_losses))
plt.plot(x_axis, g_losses, 'r')
plt.title("Generator Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
return
class Discriminator(nn.Module):
"""GAN Discriminator class."""
def __init__(self, activation_function=nn.LeakyReLU()):
super(Discriminator, self).__init__()
self.activ = activation_function
# Linear classifier layers
self.classifier_1 = nn.Linear(1, 100)
self.classifier_2 = nn.Linear(100, 300)
self.classifier_3 = nn.Linear(300, 100)
self.classifier_4 = nn.Linear(100, 1)
# Whether or not non-linearity is applied
self.non_linearity = [1, 1, 1, 0]
# Full forward pass
def forward(self, time_series):
x = self.activ(self.classifier_1(time_series))
x = self.activ(self.classifier_2(x))
x = self.activ(self.classifier_3(x))
score = self.classifier_4(x)
return score
class Generator(nn.Module):
"""GAN Generator class."""
def __init__(self, latent_size,
activation_function=nn.LeakyReLU()):
super(Generator, self).__init__()
self.activ = activation_function
# Linear generation layers
self.upscaler_1 = nn.Linear(latent_size, 30)
self.upscaler_2 = nn.Linear(30, 60)
self.output_1 = nn.Linear(60, 30)
self.output_2 = nn.Linear(30, 1)
# Whether or not non-linearity is applied
self.non_linearity = [1, 1, 1, 0]
# Full forward pass
def forward(self, latent_noise):
# Scale up to feature space size
x = self.activ(self.upscaler_1(latent_noise))
x = self.activ(self.upscaler_2(x))
x = self.activ(self.output_1(x))
fake = self.output_2(x)
return fake
| 29.0175
| 93
| 0.558198
| 2,883
| 23,214
| 4.301769
| 0.099549
| 0.026447
| 0.014191
| 0.018384
| 0.814627
| 0.797291
| 0.783341
| 0.762055
| 0.760926
| 0.760926
| 0
| 0.015209
| 0.323081
| 23,214
| 799
| 94
| 29.053817
| 0.774023
| 0.131042
| 0
| 0.76087
| 0
| 0.005435
| 0.049813
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.024457
| null | null | 0.048913
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
40f348c21856010e93c3ca2bd4e7e2571e2486e0
| 195
|
py
|
Python
|
www/views/__init__.py
|
ki-tools/sls_ki_synapse_admin_py
|
d9483d01000b61c4e8d129bdc06497ae1a27484b
|
[
"Apache-2.0"
] | null | null | null |
www/views/__init__.py
|
ki-tools/sls_ki_synapse_admin_py
|
d9483d01000b61c4e8d129bdc06497ae1a27484b
|
[
"Apache-2.0"
] | null | null | null |
www/views/__init__.py
|
ki-tools/sls_ki_synapse_admin_py
|
d9483d01000b61c4e8d129bdc06497ae1a27484b
|
[
"Apache-2.0"
] | null | null | null |
from .views import home
from .login import views
from .synapse_space import views
from .synapse_space.daa import views
from .synapse_space.dca import views
from .synapse_space.basic import views
| 27.857143
| 38
| 0.830769
| 31
| 195
| 5.096774
| 0.322581
| 0.348101
| 0.379747
| 0.556962
| 0.683544
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123077
| 195
| 6
| 39
| 32.5
| 0.923977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9097621e3c6822a86cb0b458ef0353703da57d05
| 942
|
py
|
Python
|
apps/blog/api/fields.py
|
dryprojects/MyBlog
|
ec04ba2bc658e96cddeb1d4766047ca8e89ff656
|
[
"BSD-3-Clause"
] | 2
|
2021-08-17T13:29:21.000Z
|
2021-09-04T05:00:01.000Z
|
apps/blog/api/fields.py
|
dryprojects/MyBlog
|
ec04ba2bc658e96cddeb1d4766047ca8e89ff656
|
[
"BSD-3-Clause"
] | 1
|
2020-07-16T11:22:32.000Z
|
2020-07-16T11:22:32.000Z
|
apps/blog/api/fields.py
|
dryprojects/MyBlog
|
ec04ba2bc658e96cddeb1d4766047ca8e89ff656
|
[
"BSD-3-Clause"
] | 1
|
2020-09-18T10:41:59.000Z
|
2020-09-18T10:41:59.000Z
|
#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: nico
@file: fields.py
@time: 2018/08/03
"""
from rest_framework import serializers
class CategoryParentField(serializers.PrimaryKeyRelatedField):
def get_queryset(self):
queryset = super().get_queryset()
return queryset if not queryset else queryset.filter(author=self.context['request'].user)
class PostParentField(serializers.HyperlinkedRelatedField):
def get_queryset(self):
#这里返回用户自己的博文,以便于选择父级别博文时不至于选择到别人的博文
queryset = super().get_queryset()
return queryset if not queryset else queryset.filter(author=self.context['request'].user)
class ResourcePostField(serializers.HyperlinkedRelatedField):
def get_queryset(self):
#这里返回用户自己的博文,以便于选择父级别博文时不至于选择到别人的博文
queryset = super().get_queryset()
return queryset if not queryset else queryset.filter(author=self.context['request'].user)
| 32.482759
| 97
| 0.719745
| 100
| 942
| 6.71
| 0.42
| 0.098361
| 0.062593
| 0.080477
| 0.701937
| 0.701937
| 0.701937
| 0.701937
| 0.701937
| 0.701937
| 0
| 0.011553
| 0.173036
| 942
| 29
| 98
| 32.482759
| 0.849807
| 0.184713
| 0
| 0.692308
| 0
| 0
| 0.027741
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.230769
| false
| 0
| 0.076923
| 0
| 0.769231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
90f35998f81c1d115a29c1d6f63c2e539f38ba5c
| 237
|
py
|
Python
|
pyriemann_qiskit/datasets/__init__.py
|
toncho11/pyRiemann-qiskit
|
93c11801127ef8d80c9e94ea0f31549a6b863238
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T20:13:16.000Z
|
2022-02-21T20:13:16.000Z
|
pyriemann_qiskit/datasets/__init__.py
|
toncho11/pyRiemann-qiskit
|
93c11801127ef8d80c9e94ea0f31549a6b863238
|
[
"BSD-3-Clause"
] | null | null | null |
pyriemann_qiskit/datasets/__init__.py
|
toncho11/pyRiemann-qiskit
|
93c11801127ef8d80c9e94ea0f31549a6b863238
|
[
"BSD-3-Clause"
] | null | null | null |
from .utils import (get_mne_sample,
get_linearly_separable_dataset,
get_qiskit_dataset)
__all__ = ["get_mne_sample",
"get_linearly_separable_dataset",
"get_qiskit_dataset"]
| 26.333333
| 51
| 0.616034
| 24
| 237
| 5.333333
| 0.458333
| 0.09375
| 0.1875
| 0.234375
| 0.859375
| 0.859375
| 0.859375
| 0.859375
| 0.859375
| 0.859375
| 0
| 0
| 0.316456
| 237
| 8
| 52
| 29.625
| 0.790123
| 0
| 0
| 0
| 0
| 0
| 0.261603
| 0.126582
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
90f38f8b193e11ecd0e22533385eccf1c8eda9c2
| 32
|
py
|
Python
|
Modules/Verify/Verify/__init__.py
|
SMCEBI-didactics/sinwo-group1
|
4653567d3d7b73d68ae00795d582454e4dc1a9b4
|
[
"MIT"
] | 5
|
2022-01-18T06:40:35.000Z
|
2022-01-24T20:41:51.000Z
|
Modules/Verify/Verify/__init__.py
|
SMCEBI-didactics/sinwo-group1
|
4653567d3d7b73d68ae00795d582454e4dc1a9b4
|
[
"MIT"
] | 5
|
2022-01-26T20:08:02.000Z
|
2022-01-27T19:25:56.000Z
|
Modules/Verify/Verify/__init__.py
|
SMCEBI-didactics/sinwo-group1
|
4653567d3d7b73d68ae00795d582454e4dc1a9b4
|
[
"MIT"
] | null | null | null |
from .verify import hash_passwd
| 16
| 31
| 0.84375
| 5
| 32
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
465b6f8b7e270ded98b9ad4da70211e7976c7811
| 192
|
py
|
Python
|
learning/__init__.py
|
shuoli90/PAC-pred-set
|
430d9dcd0b9f444f9707cf803c5f61c318794f92
|
[
"Apache-2.0"
] | null | null | null |
learning/__init__.py
|
shuoli90/PAC-pred-set
|
430d9dcd0b9f444f9707cf803c5f61c318794f92
|
[
"Apache-2.0"
] | null | null | null |
learning/__init__.py
|
shuoli90/PAC-pred-set
|
430d9dcd0b9f444f9707cf803c5f61c318794f92
|
[
"Apache-2.0"
] | 1
|
2021-07-22T18:38:25.000Z
|
2021-07-22T18:38:25.000Z
|
from learning.loss import *
from learning.util import *
from learning.base import BaseLearner
from learning.classification import ClsLearner
from learning.pred_set import PredSetConstructor
| 24
| 48
| 0.848958
| 24
| 192
| 6.75
| 0.5
| 0.37037
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114583
| 192
| 7
| 49
| 27.428571
| 0.952941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d3b5dd8142af57d69505281036858b1b189e165a
| 38,881
|
py
|
Python
|
scripts/stepmotor/steppingmotor/t6110.py
|
gw-vis/pcas-controller
|
a7479195831b29f9e21806161553eee90c46a96b
|
[
"MIT"
] | null | null | null |
scripts/stepmotor/steppingmotor/t6110.py
|
gw-vis/pcas-controller
|
a7479195831b29f9e21806161553eee90c46a96b
|
[
"MIT"
] | 3
|
2020-09-30T03:14:38.000Z
|
2020-10-14T23:01:53.000Z
|
scripts/stepmotor/steppingmotor/t6110.py
|
gw-vis/pcas-controller
|
a7479195831b29f9e21806161553eee90c46a96b
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
Created on Mar 7, 2012
@author: Filip
"""
#import serial
import socket
import struct
import time
from numpy import log2, sqrt
class MotorError(Exception):
pass
class Trinamic_control6110():
class ReceiveData():
def __init__(self, adr=0, status=0, cmd=0, value=0):
self.status = status
self.commandNumber = cmd
self.value = value
self.moduleAddress = adr
def __init__(self):
self.commandDict = {'ROR':1, 'ROL':2, 'MST':3, 'MVP':4, 'SAP':5, 'GAP':6,
'STAP':7, 'RSAP':8, 'SGP':9, 'GGP':10, 'RFS':13, 'SIO':14, 'GIO':15, 'WAIT':27, 'STOP':28,
'SCO':30, 'GCO':31, 'CCO':32, 'VER':136, 'RST':255}
self.position = 0
self.speed = 0.0
self.timeout = 2.
self.writeTimeout = 0.0
self.connected = None
self.port = None
self.portName = None
self.errorDict = {1:'Wrong checksum', 2:'Invalid command', 3:'Wrong type', 4:'Invalid value',
5:'Configuration EEPROM locked', 6:'Command not available'}
self.maxModuleCurrent = 1.6
# def connectRS485(self, port, baudrate=9600):
# try:
# self.port = serial.Serial(port, baudrate, timeout=self.timeout, writeTimeout=self.writeTimeout)
# self.connected = 'RS485'
# self.portName = port
# self.baudrate = baudrate
# except Exception, e:
# print 'Could not connect to RS485', e
def connectTCP(self, ipadr, port):
self.port = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.port.connect((ipadr, port))
self.port.settimeout(self.timeout)
self.connected = 'TCP'
self.portName = [ipadr, port]
except Exception, e:
print 'Could not connect to TCP', e
def close(self):
if self.port != None:
self.port.close()
self.connected = None
def sendCommand(self, cmd, type, motor, value):
adr = 1
try:
command = self.commandDict[cmd]
except KeyError:
return 'Wrong command'
tmp = struct.pack('BBBBi', adr, command, type, motor, value)
checksum = sum(struct.unpack('BBBBBBBB', tmp)) % 256
TxBuffer = struct.pack('>BBBBiB', adr, command, type, motor, value, checksum)
if self.connected == 'RS485':
if self.port.inWaiting() > 0:
self.port.flushInput()
self.port.flushOutput()
self.port.write(TxBuffer)
elif self.connected == 'TCP':
self.port.send(TxBuffer)
return TxBuffer
def receiveData(self):
if self.connected == 'RS485':
RxBuffer = self.port.read(9)
if self.port.inWaiting() > 0:
self.port.flushInput()
elif self.connected == 'TCP':
RxBuffer = self.port.recv(9)
else:
RxBuffer = ''
if RxBuffer.__len__() == 9:
data = struct.unpack('>BBBBiB', RxBuffer)
rData = self.ReceiveData(data[1], data[2], data[3], data[4])
else:
rData = self.ReceiveData(None, None, None, None)
self.reconnect()
return rData
def reconnect(self):
print 'Reconnecting...'
if self.connected == 'RS485':
self.close()
self.connectRS485(self.portName, self.baudrate)
elif self.connected == 'TCP':
self.close()
self.connectTCP(self.portName[0], self.portName[1])
print 'Testing connection:'
self.sendCommand('GAP', 1, 0, 0)
if self.connected == 'RS485':
RxBuffer = self.port.read(9)
elif self.connected == 'TCP':
RxBuffer = self.port.recv(9)
else:
RxBuffer = ''
if RxBuffer.__len__() != 9:
status = 0
self.close()
raise MotorError('Reconnection failed.')
else:
status = 1
print '...ok'
return status
def reset(self):
cmd = 'RST' # Reset
type = 0 #
value = 1234 # 1234
self.sendCommand(cmd, type, 0, value)
data = self.receiveData()
# print 'Status',data.status
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def setBaudrate(self, baudrate):
cmd = 'SGP' # Set global parameter
type = 65 # baudrate
if baudrate >= 0 or baudrate > 115200:
# if it is a number less than 8 assume it is an index to a list of baudrates
if baudrate < 8:
value = int(baudrate)
# Or else it is the actual baudrate
else:
value = int(baudrate / 9600)
# Snap to index in list of baudrates
if value < 2:
# Baudrate 9600 has index 0. We lose 14400, but don't care
value = 0
self.sendCommand(cmd, type, 0, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
self.reset()
time.sleep(0.5)
port = self.port.getPort()
self.close()
if value > 1:
actualBaudrate = value * 9600
else:
actualBaudrate = (1 + 0.5 * value) * 9600
print 'New baudrate:', actualBaudrate
self.connectRS485(port, actualBaudrate)
else:
if baudrate < 0:
raise MotorError('Baud rate negative')
else:
raise MotorError('Baud rate too high (>115200)')
pass
def getBaudrate(self):
cmd = 'GGP' # Get global parameter
type = 65 # baudrate
value = 0 # Don't care
self.sendCommand(cmd, type, 0, value)
data = self.receiveData()
# print 'Status',data.status
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
if data.value > 1:
baudrate = data.value * 9600
else:
baudrate = 9600 * (1 + 0.5 * data.value)
return baudrate
def setTargetPosition(self, pos, motor=0):
cmd = 'MVP' # Move to position
type = 0 # Absolute
value = int(pos)
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def setMaxCurrent(self, current, motor=0):
cmd = 'SAP' # Get axis parameter
type = 6 # Maximum current (peak)
value = int(current / self.maxModuleCurrent * 255.0) # Value in % to max module current
# (scaled to 255)
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
cmd = 'SAP' # Get axis parameter
type = 7 # Standby current
value = int(current / self.maxModuleCurrent * 255.0 * 0.1) # Value in % to max module current
# (scaled to 255)
# Standby current set to 10% of the drive current
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.status
def getMaxCurrent(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 6 # Maximum current (peak)
value = 0 # Don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
# print 'Status',data.status
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
current = data.value * self.maxModuleCurrent / 255.0
return current
def setIdleCurrent(self, idleCurrent, motor=0):
cmd = 'SAP' # Get axis parameter
type = 7 # Standby current
value = int(idleCurrent / self.maxModuleCurrent * 255.0) # Value in % to max module current
# (scaled to 255)
# Standby current set to idleCurrent
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.status
def getPulseDivisor(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 154 # Pulse divisor
value = 0 # Don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def setPulseDivisor(self, pd, motor=0):
""" Number of pulse division per step.
Should be 0-13"""
cmd = 'SAP' # Get axis parameter
type = 154 # Microstep resolution
value = int(pd) # Microstep resolution
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def getMicrostepResolution(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 140 # Microstep resolution
value = 0 # Don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
# print 'Status',data.status
# print 'Value:',data.value
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
res = 2 ** data.value
return res
def setMicrostepResolution(self, res, motor=0):
""" Number of microsteps per full step.
Should be 1,2,4,8,16,32 or 64"""
cmd = 'SAP' # Get axis parameter
type = 140 # Microstep resolution
value = int(log2(res)) # Microstep resolution
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def getActualPosition(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 1 # Actual position
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def stop(self, motor=0):
cmd = 'MST' # Motor stop
type = 0 # don't care
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def getTargetSpeed(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 4 # Target speed... maybe use 4 (max pos speed)?
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def setTargetSpeed(self, speed, motor=0):
cmd = 'SAP' # Get axis parameter
type = 4 # Target speed (max pos speed)
value = int(speed) # Speed
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def getActualSpeed(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 3 # Actual speed
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def getTargetPosition(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 0 # Target position
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def getTargetPositionReached(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 8 # Target position
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def setLimitSwitchPolarity(self, polarity):
'''The TMCM6110 can set the limit switch polarity (normally open / normally closed)
in sets of three motors. Motors 0-2 are controlled by bit 0 in GP 79, motors 3-5
by bit 1. It would be confusing to divide half of the controller , so we set the
polarity of all six motors with this command.
'''
cmd = 'SGP' # Set global parameter
type = 79 # Limit polarity is controlled by global parameter 79, selected by parameter "type"
motor = 0 # Bank 0
if polarity == 0:
value = 3 # Invert all outputs
else:
value = 0
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def getRightLimitSwitch(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 10 # Target position
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def getRightLimitSwitchEnabled(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 12 # Right limit switch enabled
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
if data.value == 0: # If return value = 0, the limit switch is enabled
return True
else:
return False
def setRightLimitSwitchEnable(self, enable, motor=0):
cmd = 'SAP' # Get axis parameter
type = 12 # Right limit switch enabled
if enable == True:
value = 0 # If 0, switch is enabled
else:
value = 1
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def getLeftLimitSwitch(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 11 # Target position
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def getLeftLimitSwitchEnabled(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 13 # Left limit switch enabled
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
if data.value == 0:
return True
else:
return False
def setLeftLimitSwitchEnable(self, enable, motor=0):
cmd = 'SAP' # Get axis parameter
type = 13 # Right limit switch enabled
if enable == True:
value = 0 # If 0, switch is enabled
else:
value = 1
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def setupMotor(self, motor=0):
self.setMaxCurrent(0.3, motor)
self.setMicrostepResolution(1, motor)
cmd = 'SAP' # Set axis parameter
type = 12 # Right limit switch disable
value = 1
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
if data.status == 100:
cmd = 'SAP' # Set axis parameter
type = 13 # Left limit switch disable
value = 1
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
if data.status == 100:
cmd = 'SAP' # Set axis parameter
type = 149 # Soft stop flag (stop immediately at limit switch)
value = 0
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.status
def definePosition(self, pos, motor=0):
self.stop(motor) # Needed to prevent the motor from moving when setting position
cmd = 'SAP' # Set axis parameter
type = 1 # Set actual position
value = pos
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
cmd = 'SAP' # Set axis parameter
type = 0 # Set target position
value = pos
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def setZeroPosition(self, motor=0):
self.stop(motor) # Needed to prevent the motor from moving when setting position
cmd = 'SAP' # Set axis parameter
type = 1 # Set actual position
value = 0
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
print 'setZeroPosition, motor ', motor, ' return ', data.status
status = data.status
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
if data.status == 100:
status = self.setTargetPosition(0, motor)
return status
def getRampDivisor(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 153 # Ramp divisor
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def setRampDivisor(self, rampDivisor, motor=0):
cmd = 'SAP' # Get axis parameter
type = 153 # Ramp divisor
if rampDivisor < 0:
raise MotorError('rampDivisor negative')
elif rampDivisor > 13:
raise MotorError('rampDivisor too high (>13)')
value = int(rampDivisor)
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def getAcceleration(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 5 # Max acceleration
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def setAcceleration(self, acceleration, motor=0):
cmd = 'SAP' # Get axis parameter
type = 5 # Max acceleration
value = acceleration # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def getFirmwareVersion(self):
cmd = 'VER' # Firmware version
type = 0 # return string
value = 0 # don't care
motor = 0
self.sendCommand(cmd, type, motor, value)
RxBuffer = self.port.read(9)
return RxBuffer[1:]
def groupMotors(self, motorList, groupIndex):
cmd = 'SAP' # Set axis parameter
type = 213 # Set actual position
value = groupIndex
for motor in motorList:
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def getPMul(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 146 # Max acceleration
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def getPDiv(self, motor=0):
cmd = 'GAP' # Get axis parameter
type = 137 # Max acceleration
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def startReferenceSearch(self, motor=0):
# Make sure limit switch is enabled
swEnabled = self.getLeftLimitSwitchEnabled(motor)
if swEnabled == False:
self.setLeftLimitSwitchEnable(True, motor)
# Setup ref search parameters
cmd = 'SAP' # Set axis parameter
type = 193 # Ref search mode
value = 1 # Search left limit switch only
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
speed = self.getTargetSpeed(motor) / 2
cmd = 'SAP' # Set axis parameter
type = 194 # Ref search speed
value = speed # Use half speed as normal running
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
cmd = 'SAP' # Set axis parameter
type = 195 # Ref switch calibration speed
value = speed / 2 # Use quarter speed as normal running
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
cmd = 'RFS' # Reference search
type = 0 # Start ref search
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def stopReferenceSearch(self, motor=0):
cmd = 'RFS' # Reference search
type = 1 # Stop ref search
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
def getReferenceSearchStatus(self, motor=0):
cmd = 'RFS' # Reference search
type = 2 # Ref search status
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def getActualMotorLoad(self, motor):
cmd = 'GAP' # Get axis parameter
type = 206 # Actual load value
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
def getDriverErrorFlags(self, motor):
cmd = 'GAP' # Get axis parameter
type = 208 # TMC206 driver error flags
value = 0 # don't care
self.sendCommand(cmd, type, motor, value)
data = self.receiveData()
if data.status != 100:
if self.errorDict.has_key(data.status):
raise MotorError(self.errorDict[data.status])
elif data.status == None:
raise MotorError('Incorrect controller response, trying to reconnect')
else:
raise MotorError(''.join(('Unknown error, ', str(data.status))))
return data.value
if __name__ == '__main__':
tc = Trinamic_control6110()
# tc.connectTCP('130.235.95.232', 4001)
tc.connectTCP('10.68.150.63', 4001)
print "getRightLimitSwitch",tc.getRightLimitSwitch()
print "getLeftLimitSwitch",tc.getLeftLimitSwitch()
tc.reconnect()
| 42.539387
| 110
| 0.544482
| 4,100
| 38,881
| 5.145366
| 0.090976
| 0.114714
| 0.027873
| 0.034841
| 0.764221
| 0.756257
| 0.741989
| 0.734642
| 0.711083
| 0.672308
| 0
| 0.024075
| 0.353669
| 38,881
| 913
| 111
| 42.58598
| 0.8154
| 0.094108
| 0
| 0.765664
| 0
| 0
| 0.105745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.002506
| 0.005013
| null | null | 0.010025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
314fbec2c69fbcaabb9d6c5aac13b506e3dd7a63
| 8,949
|
py
|
Python
|
load_data.py
|
HLBilove/makt
|
c632ef6a9c85da069ad56c6de8e5f8906ec6d0d9
|
[
"MIT"
] | null | null | null |
load_data.py
|
HLBilove/makt
|
c632ef6a9c85da069ad56c6de8e5f8906ec6d0d9
|
[
"MIT"
] | null | null | null |
load_data.py
|
HLBilove/makt
|
c632ef6a9c85da069ad56c6de8e5f8906ec6d0d9
|
[
"MIT"
] | null | null | null |
# Code reused from https://github.com/arghosh/AKT
import numpy as np
import math
class DATA(object):
def __init__(self, n_skill, seqlen, separate_char, name="data"):
self.separate_char = separate_char
self.seqlen = seqlen
self.n_skill = n_skill
def load_data(self, path):
file_data = open(path, 'r')
s_data = []
sr_data = []
sa1_data = []
sa2_data = []
e_data = []
if_data = []
for lineID, line in enumerate(file_data):
line = line.strip()
# lineID starts from 0
if lineID % 6 == 0:
learner_id = lineID//3
if lineID % 6 == 1:
S = line.split(self.separate_char)
if len(S[len(S)-1]) == 0:
S = S[:-1]
if lineID % 6 == 2:
E = line.split(self.separate_char)
if len(E[len(E)-1]) == 0:
E = E[:-1]
if lineID % 6 == 3:
IF = line.split(self.separate_char)
if len(IF[len(IF)-1]) == 0:
IF = IF[:-1]
if lineID % 6 == 4:
R = line.split(self.separate_char)
if len(R[len(R)-1]) == 0:
R = R[:-1]
if lineID % 6 == 5:
A = line.split(self.separate_char)
if len(A[len(A)-1]) == 0:
A = A[:-1]
A1 = []
A2 = []
for i in range(len(A)):
A1.append(A[i].split(' ')[0])
A2.append(A[i].split(' ')[1])
# start split the data
n_split = 1
if len(S) > self.seqlen:
n_split = math.floor(len(S) / self.seqlen)
if len(S) % self.seqlen:
n_split = n_split + 1
for k in range(n_split):
s_seq = []
e_seq = []
if_seq = []
r_seq = []
a1_seq = []
a2_seq = []
if k == n_split - 1:
endINdex = len(R)
else:
endINdex = (k+1) * self.seqlen
for i in range(k * self.seqlen, endINdex):
if len(S[i]) > 0:
Xindex = int(S[i]) + round(float(R[i])) * self.n_skill
Yindex = int(S[i]) + round(float(A1[i])) * self.n_skill
Zindex = int(S[i]) + round(float(A2[i])) * self.n_skill
s_seq.append(int(S[i]))
e_seq.append(int(E[i]))
if_seq.append(int(IF[i]))
r_seq.append(Xindex)
a1_seq.append(Yindex)
a2_seq.append(Zindex)
else:
print(S[i])
s_data.append(s_seq)
sr_data.append(r_seq)
sa1_data.append(a1_seq)
sa2_data.append(a2_seq)
e_data.append(e_seq)
if_data.append(if_seq)
file_data.close()
### data: [[],[],[],...] <-- set_max_seqlen is used
# convert data into ndarrays for better speed during training
s_dataArray = np.zeros((len(s_data), self.seqlen))
for j in range(len(s_data)):
dat = s_data[j]
s_dataArray[j, :len(dat)] = dat
sr_dataArray = np.zeros((len(sr_data), self.seqlen))
for j in range(len(sr_data)):
dat = sr_data[j]
sr_dataArray[j, :len(dat)] = dat
sa1_dataArray = np.zeros((len(sa1_data), self.seqlen))
for j in range(len(sa1_data)):
dat = sa1_data[j]
sa1_dataArray[j, :len(dat)] = dat
sa2_dataArray = np.zeros((len(sa2_data), self.seqlen))
for j in range(len(sa2_data)):
dat = sa2_data[j]
sa2_dataArray[j, :len(dat)] = dat
e_dataArray = np.zeros((len(e_data), self.seqlen))
for j in range(len(e_data)):
dat = e_data[j]
e_dataArray[j, :len(dat)] = dat
if_dataArray = np.zeros((len(if_data), self.seqlen))
for j in range(len(if_data)):
dat = if_data[j]
if_dataArray[j, :len(dat)] = dat
return s_dataArray, sr_dataArray, sa1_dataArray, sa2_dataArray, e_dataArray, if_dataArray
def load_test_data(self, path):
file_data = open(path, 'r')
s_data = []
sr_data = []
sa1_data = []
sa2_data = []
e_data = []
if_data = []
test_e_num = 0
for lineID, line in enumerate(file_data):
line = line.strip()
# lineID starts from 0
if lineID % 6 == 0:
learner_id = lineID//3
if lineID % 6 == 1:
S = line.split(self.separate_char)
if len(S[len(S)-1]) == 0:
S = S[:-1]
test_e_num += len(S)
if lineID % 6 == 2:
E = line.split(self.separate_char)
if len(E[len(E)-1]) == 0:
E = E[:-1]
if lineID % 6 == 3:
IF = line.split(self.separate_char)
if len(IF[len(IF)-1]) == 0:
IF = IF[:-1]
if lineID % 6 == 4:
R = line.split(self.separate_char)
if len(R[len(R)-1]) == 0:
R = R[:-1]
if lineID % 6 == 5:
A = line.split(self.separate_char)
if len(A[len(A)-1]) == 0:
A = A[:-1]
A1 = []
A2 = []
for i in range(len(A)):
A1.append(A[i].split(' ')[0])
A2.append(A[i].split(' ')[1])
# start split the data
n_split = 1
if len(S) > self.seqlen:
n_split = math.floor(len(S) / self.seqlen)
if len(S) % self.seqlen:
n_split = n_split + 1
for k in range(n_split):
s_seq = []
e_seq = []
if_seq = []
r_seq = []
a1_seq = []
a2_seq = []
if k == n_split - 1:
endINdex = len(R)
else:
endINdex = (k+1) * self.seqlen
for i in range(k * self.seqlen, endINdex):
if len(S[i]) > 0:
Xindex = int(S[i]) + round(float(R[i])) * self.n_skill
Yindex = int(S[i]) + round(float(A1[i])) * self.n_skill
Zindex = int(S[i]) + round(float(A2[i])) * self.n_skill
s_seq.append(int(S[i]))
e_seq.append(int(E[i]))
if_seq.append(int(IF[i]))
r_seq.append(Xindex)
a1_seq.append(Yindex)
a2_seq.append(Zindex)
else:
print(S[i])
s_data.append(s_seq)
sr_data.append(r_seq)
sa1_data.append(a1_seq)
sa2_data.append(a2_seq)
e_data.append(e_seq)
if_data.append(if_seq)
file_data.close()
### data: [[],[],[],...] <-- set_max_seqlen is used
# convert data into ndarrays for better speed during training
s_dataArray = np.zeros((len(s_data), self.seqlen))
for j in range(len(s_data)):
dat = s_data[j]
s_dataArray[j, :len(dat)] = dat
sr_dataArray = np.zeros((len(sr_data), self.seqlen))
for j in range(len(sr_data)):
dat = sr_data[j]
sr_dataArray[j, :len(dat)] = dat
sa1_dataArray = np.zeros((len(sa1_data), self.seqlen))
for j in range(len(sa1_data)):
dat = sa1_data[j]
sa1_dataArray[j, :len(dat)] = dat
sa2_dataArray = np.zeros((len(sa2_data), self.seqlen))
for j in range(len(sa2_data)):
dat = sa2_data[j]
sa2_dataArray[j, :len(dat)] = dat
e_dataArray = np.zeros((len(e_data), self.seqlen))
for j in range(len(e_data)):
dat = e_data[j]
e_dataArray[j, :len(dat)] = dat
if_dataArray = np.zeros((len(if_data), self.seqlen))
for j in range(len(if_data)):
dat = if_data[j]
if_dataArray[j, :len(dat)] = dat
return s_dataArray, sr_dataArray, sa1_dataArray, sa2_dataArray, e_dataArray, if_dataArray, test_e_num
| 38.24359
| 109
| 0.423176
| 1,099
| 8,949
| 3.272066
| 0.084622
| 0.06396
| 0.038932
| 0.063404
| 0.936596
| 0.936596
| 0.936596
| 0.936596
| 0.936596
| 0.936596
| 0
| 0.0263
| 0.451894
| 8,949
| 234
| 109
| 38.24359
| 0.70683
| 0.038775
| 0
| 0.936585
| 0
| 0
| 0.001164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014634
| false
| 0
| 0.009756
| 0
| 0.039024
| 0.009756
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
319795a390be23e2096fa172d10cfbf096806b71
| 8,890
|
py
|
Python
|
text_net.py
|
muziyongshixin/adv_cross_modal_hashing
|
cf0f9f33a3dad763ab6dd4232a00a3b0b93c64a1
|
[
"MIT"
] | 2
|
2021-03-16T10:45:10.000Z
|
2021-05-16T12:31:01.000Z
|
text_net.py
|
muziyongshixin/adv_cross_modal_hashing
|
cf0f9f33a3dad763ab6dd4232a00a3b0b93c64a1
|
[
"MIT"
] | 2
|
2021-03-31T08:21:20.000Z
|
2021-06-22T16:08:34.000Z
|
text_net.py
|
muziyongshixin/adv_cross_modal_hashing
|
cf0f9f33a3dad763ab6dd4232a00a3b0b93c64a1
|
[
"MIT"
] | null | null | null |
import random
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.nn import Parameter
import torchvision as tv
import tokenization
from bert import BertConfig, BertModel
import bert
def freeze_layers(model):
for child in model.children():
for param in child.parameters():
param.requires_grad = False
def transfer_ckpt(ori_data):
import collections
result = collections.OrderedDict()
for k in ori_data:
new_k = k.replace('bert.', '')
new_k = new_k.replace('LayerNorm.weight', 'LayerNorm.gamma')
new_k = new_k.replace('LayerNorm.bias', 'LayerNorm.beta')
result[new_k] = ori_data[k]
return result
class BertMapping(nn.Module):
"""
"""
def __init__(self, opt):
super(BertMapping, self).__init__()
bert_config = BertConfig.from_json_file(opt.bert_config_file)
self.bert = BertModel(bert_config)
ori_ckpt = torch.load(opt.init_checkpoint, map_location='cpu')
transed_ckpt = transfer_ckpt(ori_ckpt)
self.bert.load_state_dict(transed_ckpt, strict=False)
freeze_layers(self.bert)
self.txt_stru = opt.txt_stru
if opt.txt_stru == 'pooling':
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.mapping = nn.Linear(bert_config.hidden_size, opt.final_dims)
elif opt.txt_stru == 'cnn':
Ks = [1, 2, 3]
in_channel = 1
out_channel = 512
embedding_dim = bert_config.hidden_size
self.convs1 = nn.ModuleList([nn.Conv2d(in_channel, out_channel, (K, embedding_dim)) for K in Ks])
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.mapping = nn.Linear(len(Ks) * out_channel, opt.final_dims)
elif opt.txt_stru == 'rnn':
embedding_dim = bert_config.hidden_size
self.bi_gru = opt.bi_gru
self.rnn = nn.GRU(embedding_dim, opt.embed_size, opt.num_layers, batch_first=True, bidirectional=opt.bi_gru)
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.mapping = nn.Linear(opt.embed_size, opt.final_dims)
elif opt.txt_stru == 'trans':
bert_config = BertConfig.from_json_file(opt.img_trans_cfg)
self.layer = bert.BERTLayer(bert_config)
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.mapping = nn.Linear(768, opt.final_dims)
def forward(self, input_ids, attention_mask, token_type_ids, lengths):
# print('bert input',input_ids.shape)
all_encoder_layers, pooled_output = self.bert(input_ids, token_type_ids=token_type_ids,attention_mask=attention_mask)
if self.txt_stru == 'pooling':
output = self.mapping(all_encoder_layers[-1])
output = torch.mean(output, 1)
code = output
elif self.txt_stru == 'cnn':
x = all_encoder_layers[-1].unsqueeze(1) # (batch_size, 1, token_num, embedding_dim)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(batch_size, out_channel, W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)
output = torch.cat(x, 1)
elif self.txt_stru == 'rnn':
x = all_encoder_layers[-1] # (batch_size, token_num, embedding_dim)
packed = pack_padded_sequence(x, lengths, batch_first=True)
# Forward propagate RNN
out, _ = self.rnn(packed)
# Reshape *final* output to (batch_size, hidden_size)
padded = pad_packed_sequence(out, batch_first=True)
cap_emb, cap_len = padded
if self.bi_gru:
cap_emb = (cap_emb[:, :, :cap_emb.size(2) / 2] + cap_emb[:, :, cap_emb.size(2) / 2:]) / 2
else:
cap_emb = cap_emb
output = torch.mean(cap_emb, 1)
elif self.txt_stru == 'trans':
hidden_states = self.mapping(all_encoder_layers[-1])
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.float()
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
hidden_states = self.layer(hidden_states, extended_attention_mask)
# output = hidden_states[:, 0, :]
output = torch.mean(hidden_states, 1)
output = self.dropout(output)
code = self.mapping(output)
# code = F.tanh(code)
code = F.normalize(code, p=2, dim=1)
return code
class BertBinaryMapping(nn.Module):
"""
"""
def __init__(self, opt):
super(BertBinaryMapping, self).__init__()
bert_config = BertConfig.from_json_file(opt.bert_config_file)
self.bert = BertModel(bert_config)
ori_ckpt = torch.load(opt.init_checkpoint, map_location='cpu')
transed_ckpt = transfer_ckpt(ori_ckpt)
self.bert.load_state_dict(transed_ckpt, strict=False)
freeze_layers(self.bert)
self.txt_stru = opt.txt_stru
if opt.txt_stru == 'pooling':
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.mapping = nn.Linear(bert_config.hidden_size, opt.final_dims)
elif opt.txt_stru == 'cnn':
Ks = [1, 2, 3]
in_channel = 1
out_channel = 512
embedding_dim = bert_config.hidden_size
self.convs1 = nn.ModuleList([nn.Conv2d(in_channel, out_channel, (K, embedding_dim)) for K in Ks])
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.mapping = nn.Linear(len(Ks) * out_channel, opt.final_dims)
elif opt.txt_stru == 'rnn':
embedding_dim = bert_config.hidden_size
self.bi_gru = opt.bi_gru
self.rnn = nn.GRU(embedding_dim, opt.embed_size, opt.num_layers, batch_first=True, bidirectional=opt.bi_gru)
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.mapping = nn.Linear(opt.embed_size, opt.final_dims)
elif opt.txt_stru == 'trans':
bert_config = BertConfig.from_json_file(opt.img_trans_cfg)
self.layer = bert.BERTLayer(bert_config)
self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)
self.mapping = nn.Linear(768, opt.final_dims)
def forward(self, input_ids, attention_mask, token_type_ids, lengths):
# print('bert input',input_ids.shape)
all_encoder_layers, pooled_output = self.bert(input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
if self.txt_stru == 'pooling':
output = self.mapping(all_encoder_layers[-1])
output = torch.mean(output, 1)
code = output
elif self.txt_stru == 'cnn':
x = all_encoder_layers[-1].unsqueeze(1) # (batch_size, 1, token_num, embedding_dim)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(batch_size, out_channel, W), ...]*len(Ks)
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)
output = torch.cat(x, 1)
elif self.txt_stru == 'rnn':
x = all_encoder_layers[-1] # (batch_size, token_num, embedding_dim)
packed = pack_padded_sequence(x, lengths, batch_first=True)
# Forward propagate RNN
out, _ = self.rnn(packed)
# Reshape *final* output to (batch_size, hidden_size)
padded = pad_packed_sequence(out, batch_first=True)
cap_emb, cap_len = padded
if self.bi_gru:
cap_emb = (cap_emb[:, :, :cap_emb.size(2) / 2] + cap_emb[:, :, cap_emb.size(2) / 2:]) / 2
else:
cap_emb = cap_emb
output = torch.mean(cap_emb, 1)
elif self.txt_stru == 'trans':
hidden_states = self.mapping(all_encoder_layers[-1])
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.float()
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
hidden_states = self.layer(hidden_states, extended_attention_mask)
# output = hidden_states[:, 0, :]
output = torch.mean(hidden_states, 1)
output = self.dropout(output)
code = self.mapping(output) # batch * dim
code=torch.softmax(code,dim=-1)
median,m_idx=torch.median(code,dim=-1)
code= code - (median.unsqueeze(1)+1e-8)
code = torch.tanh(code*10)
# code = F.normalize(code, p=2, dim=1)
return code
| 45.357143
| 125
| 0.623622
| 1,182
| 8,890
| 4.42555
| 0.139594
| 0.04588
| 0.042822
| 0.030587
| 0.861021
| 0.861021
| 0.851845
| 0.841522
| 0.841522
| 0.841522
| 0
| 0.014825
| 0.264005
| 8,890
| 195
| 126
| 45.589744
| 0.784655
| 0.072891
| 0
| 0.7625
| 0
| 0
| 0.017319
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0375
| false
| 0
| 0.0875
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9eee7f3137ab32ca6598ac920849a5b0b9f78d9f
| 5,426
|
py
|
Python
|
src/media_server/migrations/0001_initial.py
|
nefarius/portfolio-backend
|
f595041354eedee71a4aa5b761501be030b81d09
|
[
"Apache-2.0"
] | 6
|
2019-06-19T12:56:42.000Z
|
2021-12-26T07:22:47.000Z
|
src/media_server/migrations/0001_initial.py
|
nefarius/portfolio-backend
|
f595041354eedee71a4aa5b761501be030b81d09
|
[
"Apache-2.0"
] | 13
|
2019-12-20T10:39:44.000Z
|
2022-02-10T09:11:09.000Z
|
src/media_server/migrations/0001_initial.py
|
nefarius/portfolio-backend
|
f595041354eedee71a4aa5b761501be030b81d09
|
[
"Apache-2.0"
] | 1
|
2021-12-01T12:03:29.000Z
|
2021-12-01T12:03:29.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-10 14:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import general.models
import media_server.models
import media_server.storages
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Audio',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('parent_id', models.CharField(max_length=22)),
('status', models.IntegerField(choices=[(0, 'not converted'), (1, 'in progress'), (2, 'converted'), (3, 'error')], default=0)),
('mime_type', models.CharField(blank=True, default='', max_length=255)),
('file', models.FileField(storage=media_server.storages.ProtectedFileSystemStorage(), upload_to=media_server.models.user_directory_path)),
('id', general.models.ShortUUIDField(prefix='a', primary_key=True, serialize=False)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Document',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('parent_id', models.CharField(max_length=22)),
('status', models.IntegerField(choices=[(0, 'not converted'), (1, 'in progress'), (2, 'converted'), (3, 'error')], default=0)),
('mime_type', models.CharField(blank=True, default='', max_length=255)),
('file', models.FileField(storage=media_server.storages.ProtectedFileSystemStorage(), upload_to=media_server.models.user_directory_path)),
('id', general.models.ShortUUIDField(prefix='d', primary_key=True, serialize=False)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Image',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('parent_id', models.CharField(max_length=22)),
('status', models.IntegerField(choices=[(0, 'not converted'), (1, 'in progress'), (2, 'converted'), (3, 'error')], default=0)),
('mime_type', models.CharField(blank=True, default='', max_length=255)),
('id', general.models.ShortUUIDField(prefix='i', primary_key=True, serialize=False)),
('file', models.ImageField(storage=media_server.storages.ProtectedFileSystemStorage(), upload_to=media_server.models.user_directory_path)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Other',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('parent_id', models.CharField(max_length=22)),
('status', models.IntegerField(choices=[(0, 'not converted'), (1, 'in progress'), (2, 'converted'), (3, 'error')], default=0)),
('mime_type', models.CharField(blank=True, default='', max_length=255)),
('file', models.FileField(storage=media_server.storages.ProtectedFileSystemStorage(), upload_to=media_server.models.user_directory_path)),
('id', general.models.ShortUUIDField(prefix='x', primary_key=True, serialize=False)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Video',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('parent_id', models.CharField(max_length=22)),
('status', models.IntegerField(choices=[(0, 'not converted'), (1, 'in progress'), (2, 'converted'), (3, 'error')], default=0)),
('mime_type', models.CharField(blank=True, default='', max_length=255)),
('file', models.FileField(storage=media_server.storages.ProtectedFileSystemStorage(), upload_to=media_server.models.user_directory_path)),
('id', general.models.ShortUUIDField(prefix='v', primary_key=True, serialize=False)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| 52.679612
| 155
| 0.594176
| 542
| 5,426
| 5.782288
| 0.190037
| 0.042119
| 0.073389
| 0.082961
| 0.863433
| 0.843331
| 0.843331
| 0.843331
| 0.843331
| 0.843331
| 0
| 0.016836
| 0.255621
| 5,426
| 102
| 156
| 53.196078
| 0.759099
| 0.012717
| 0
| 0.680851
| 1
| 0
| 0.095816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074468
| 0
| 0.117021
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9ef60e5b2ae8fb9303d369515e1e61881fd5a6e1
| 7,192
|
py
|
Python
|
userbot/modules/shazam_helper/user_agent.py
|
im-bb/CyberUserBot
|
945c2d6a4c05b11592611b2451a7cf15a40c3530
|
[
"MIT"
] | 2
|
2021-09-24T06:19:40.000Z
|
2021-09-30T12:24:56.000Z
|
userbot/modules/shazam_helper/user_agent.py
|
im-bb/CyberUserBot
|
945c2d6a4c05b11592611b2451a7cf15a40c3530
|
[
"MIT"
] | null | null | null |
userbot/modules/shazam_helper/user_agent.py
|
im-bb/CyberUserBot
|
945c2d6a4c05b11592611b2451a7cf15a40c3530
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
#-*- encoding: Utf-8 -*-
# From https://github.com/SaswatPadhi/FlashProfileDemo/blob/c1e3f05d09f6443568a606dc0a439d6ebb057ae1/tests/hetero/user_agents.json
USER_AGENTS = [
"Dalvik/2.1.0 (Linux; U; Android 5.0.2; VS980 4G Build/LRX22G)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; SM-T210 Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-P905V Build/LMY47X)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; Vodafone Smart Tab 4G Build/KTU84P)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; SM-G360H Build/KTU84P)",
"Dalvik/2.1.0 (Linux; U; Android 5.0.2; SM-S920L Build/LRX22G)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; Fire Pro Build/LRX21M)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; SM-N9005 Build/LRX21V)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-G920F Build/MMB29K)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; SM-G7102 Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; SM-G900F Build/LRX21T)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-G928F Build/MMB29K)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-J500FN Build/LMY48B)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; Coolpad 3320A Build/LMY47V)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; SM-J110F Build/KTU84P)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; SAMSUNG-SGH-I747 Build/KOT49H)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; SAMSUNG-SM-T337A Build/KOT49H)",
"Dalvik/1.6.0 (Linux; U; Android 4.3; SGH-T999 Build/JSS15J)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; D6603 Build/23.5.A.0.570)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-J700H Build/LMY48B)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; HTC6600LVW Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-N910G Build/LMY47X)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-N910T Build/LMY47X)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; C6903 Build/14.4.A.0.157)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-G920F Build/MMB29K)",
"Dalvik/1.6.0 (Linux; U; Android 4.2.2; GT-I9105P Build/JDQ39)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; SM-G900F Build/LRX21T)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; GT-I9192 Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-G531H Build/LMY48B)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; SM-N9005 Build/LRX21V)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; LGMS345 Build/LMY47V)",
"Dalvik/2.1.0 (Linux; U; Android 5.0.2; HTC One Build/LRX22G)",
"Dalvik/2.1.0 (Linux; U; Android 5.0.2; LG-D800 Build/LRX22G)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-G531H Build/LMY48B)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; SM-N9005 Build/LRX21V)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; SM-T113 Build/KTU84P)",
"Dalvik/1.6.0 (Linux; U; Android 4.2.2; AndyWin Build/JDQ39E)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; Lenovo A7000-a Build/LRX21M)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; LGL16C Build/KOT49I.L16CV11a)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; GT-I9500 Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 5.0.2; SM-A700FD Build/LRX22G)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; SM-G130HN Build/KOT49H)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; SM-N9005 Build/KOT49H)",
"Dalvik/1.6.0 (Linux; U; Android 4.1.2; LG-E975T Build/JZO54K)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; E1 Build/KOT49H)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; GT-I9500 Build/KOT49H)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; GT-N5100 Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-A310F Build/LMY47X)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-J105H Build/LMY47V)",
"Dalvik/1.6.0 (Linux; U; Android 4.3; GT-I9305T Build/JSS15J)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; android Build/JDQ39)",
"Dalvik/1.6.0 (Linux; U; Android 4.2.1; HS-U970 Build/JOP40D)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; SM-T561 Build/KTU84P)",
"Dalvik/1.6.0 (Linux; U; Android 4.2.2; GT-P3110 Build/JDQ39)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-G925T Build/MMB29K)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; HUAWEI Y221-U22 Build/HUAWEIY221-U22)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-G530T1 Build/LMY47X)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-G920I Build/LMY47X)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; SM-G900F Build/LRX21T)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; Vodafone Smart ultra 6 Build/LMY47V)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; XT1080 Build/SU6-7.7)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; ASUS MeMO Pad 7 Build/KTU84P)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; SM-G800F Build/KOT49H)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; GT-N7100 Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-G925I Build/MMB29K)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; A0001 Build/MMB29X)",
"Dalvik/2.1.0 (Linux; U; Android 5.1; XT1045 Build/LPB23.13-61)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; LGMS330 Build/LMY47V)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; Z970 Build/KTU84P)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; SM-N900P Build/LRX21V)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; T1-701u Build/HuaweiMediaPad)",
"Dalvik/2.1.0 (Linux; U; Android 5.1; HTCD100LVWPP Build/LMY47O)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-G935R4 Build/MMB29M)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-G930V Build/MMB29M)",
"Dalvik/2.1.0 (Linux; U; Android 5.0.2; ZTE Blade Q Lux Build/LRX22G)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; GT-I9060I Build/KTU84P)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; LGUS992 Build/MMB29M)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-G900P Build/MMB29M)",
"Dalvik/1.6.0 (Linux; U; Android 4.1.2; SGH-T999L Build/JZO54K)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-N910V Build/LMY47X)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; GT-I9500 Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-P601 Build/LMY47X)",
"Dalvik/1.6.0 (Linux; U; Android 4.2.2; GT-S7272 Build/JDQ39)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-N910T Build/LMY47X)",
"Dalvik/1.6.0 (Linux; U; Android 4.3; SAMSUNG-SGH-I747 Build/JSS15J)",
"Dalvik/2.1.0 (Linux; U; Android 5.0.2; ZTE Blade Q Lux Build/LRX22G)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-G930F Build/MMB29K)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; HTC_PO582 Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 6.0; HUAWEI MT7-TL10 Build/HuaweiMT7-TL10)",
"Dalvik/2.1.0 (Linux; U; Android 6.0; LG-H811 Build/MRA58K)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; SM-N7505 Build/KOT49H)",
"Dalvik/2.1.0 (Linux; U; Android 6.0; LG-H815 Build/MRA58K)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; LenovoA3300-HV Build/KOT49H)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; SM-G360G Build/KTU84P)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; GT-I9300I Build/KTU84P)",
"Dalvik/2.1.0 (Linux; U; Android 5.0; SM-G900F Build/LRX21T)",
"Dalvik/2.1.0 (Linux; U; Android 6.0.1; SM-J700T Build/MMB29K)",
"Dalvik/2.1.0 (Linux; U; Android 5.1.1; SM-J500FN Build/LMY48B)",
"Dalvik/1.6.0 (Linux; U; Android 4.2.2; SM-T217S Build/JDQ39)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.4; SAMSUNG-SM-N900A Build/KTU84P)"
]
| 67.214953
| 130
| 0.632925
| 1,420
| 7,192
| 3.203521
| 0.121831
| 0.131897
| 0.15388
| 0.30776
| 0.792262
| 0.792262
| 0.792262
| 0.792262
| 0.792262
| 0.761266
| 0
| 0.187552
| 0.157814
| 7,192
| 107
| 131
| 67.214953
| 0.56348
| 0.023498
| 0
| 0.196078
| 0
| 0.980392
| 0.882923
| 0.012107
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
730296f4cb9a7c207c073118e52310cf411c9138
| 2,777
|
py
|
Python
|
tests/unit/test_config_tree.py
|
chadell/yangify
|
b35316cde7e2e5e166db593d0a9cc6b448bb9047
|
[
"Apache-2.0"
] | 109
|
2019-05-06T14:02:40.000Z
|
2022-03-13T02:47:44.000Z
|
tests/unit/test_config_tree.py
|
chadell/yangify
|
b35316cde7e2e5e166db593d0a9cc6b448bb9047
|
[
"Apache-2.0"
] | 24
|
2019-05-06T13:47:12.000Z
|
2020-12-11T11:06:08.000Z
|
tests/unit/test_config_tree.py
|
chadell/yangify
|
b35316cde7e2e5e166db593d0a9cc6b448bb9047
|
[
"Apache-2.0"
] | 29
|
2019-05-06T17:41:53.000Z
|
2021-08-17T01:02:30.000Z
|
from yangify.translator.config_tree import ConfigTree
expected_simple = """interface Gi1
description "A description for Gi1"
shutdown
exit
!
interface Gi2
description "A description for Gi2"
exit
!
logging something something
logging something else
"""
expected_double_nested = """interface Gi1
description "A description for Gi1"
shutdown
another nest
more subsubcommands
exit
!
interface Gi2
description "A description for Gi2"
exit
!
logging something something
logging something else
"""
class Test:
def test_simple(self) -> None:
config = ConfigTree()
gi1 = config.new_section("interface Gi1")
gi1.add_command(' description "A description for Gi1"')
gi1.add_command(" shutdown")
gi1.add_command(" exit")
gi1.add_command("!")
gi2 = config.new_section("interface Gi2")
gi2.add_command(' description "A description for Gi2"')
gi2.add_command(" exit")
gi2.add_command("!")
config.add_command("logging something something")
config.add_command("logging something else")
assert config.to_string() == expected_simple
def test_simple_pop(self) -> None:
config = ConfigTree()
gi1 = config.new_section("interface Gi1")
gi1.add_command(' description "A description for Gi1"')
gi1.add_command(" shutdown")
gi1.add_command(" exit")
gi1.add_command("!")
gi2 = config.new_section("interface Gi2")
gi2.add_command(' description "A description for Gi2"')
gi2.add_command(" exit")
gi2.add_command("!")
gi3 = config.new_section("interface Gi3")
gi3.add_command(' description "A description for Gi3"')
gi3.add_command(" exit")
gi3.add_command("!")
config.pop_section("interface Gi3")
config.add_command("logging something something")
config.add_command("logging something else")
assert config.to_string() == expected_simple
def test_double_nest(self) -> None:
config = ConfigTree()
gi1 = config.new_section("interface Gi1")
gi1.add_command(' description "A description for Gi1"')
gi1.add_command(" shutdown")
nest = gi1.new_section(" another nest")
nest.add_command(" more subsubcommands")
gi1.add_command(" exit")
gi1.add_command("!")
gi2 = config.new_section("interface Gi2")
gi2.add_command(' description "A description for Gi2"')
gi2.add_command(" exit")
gi2.add_command("!")
config.add_command("logging something something")
config.add_command("logging something else")
assert config.to_string() == expected_double_nested
| 33.059524
| 65
| 0.64458
| 317
| 2,777
| 5.470032
| 0.126183
| 0.178777
| 0.089965
| 0.164937
| 0.816609
| 0.816609
| 0.795848
| 0.795848
| 0.739331
| 0.739331
| 0
| 0.026354
| 0.24847
| 2,777
| 83
| 66
| 33.457831
| 0.804504
| 0
| 0
| 0.727273
| 0
| 0
| 0.383867
| 0
| 0
| 0
| 0
| 0
| 0.038961
| 1
| 0.038961
| false
| 0
| 0.012987
| 0
| 0.064935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
731901a3a5cf639f044c0e57bb9eb2d3a0a94e15
| 313
|
py
|
Python
|
video/constants.py
|
sharmapacific/Youtube_latest_video
|
8d4802abf5b3fcfbb5be0f58a5f0ea1bd3954286
|
[
"MIT"
] | null | null | null |
video/constants.py
|
sharmapacific/Youtube_latest_video
|
8d4802abf5b3fcfbb5be0f58a5f0ea1bd3954286
|
[
"MIT"
] | 12
|
2021-03-19T09:33:08.000Z
|
2022-03-12T00:21:03.000Z
|
video/constants.py
|
sharmapacific/Youtube_latest_video
|
8d4802abf5b3fcfbb5be0f58a5f0ea1bd3954286
|
[
"MIT"
] | 1
|
2020-03-29T12:15:15.000Z
|
2020-03-29T12:15:15.000Z
|
# LATEST_VIDEO = 'https://www.googleapis.com/youtube/v3/search?type=video&order=date&part=snippet&maxResults=10&publishedAfter=2020-03-28T00:00:00Z&key={}' # noqa
LATEST_VIDEO = 'https://www.googleapis.com/youtube/v3/search?type=video&order=date&part=snippet&publishedAfter=2020-03-28T00:00:00Z&key={}' # noqa
| 78.25
| 163
| 0.766773
| 48
| 313
| 4.958333
| 0.5
| 0.092437
| 0.134454
| 0.159664
| 0.94958
| 0.94958
| 0.94958
| 0.94958
| 0.638655
| 0.638655
| 0
| 0.107023
| 0.044728
| 313
| 3
| 164
| 104.333333
| 0.688963
| 0.527157
| 0
| 0
| 0
| 1
| 0.847222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
7343cc6e715593c4fadbc6ff7d7d4861a7fd7dc1
| 28,569
|
py
|
Python
|
tests/test_mongoengine.py
|
quaxsze/flask-file-system
|
5ab2cb5c4b5f2b91b53153574d035a924eb6d74c
|
[
"MIT"
] | null | null | null |
tests/test_mongoengine.py
|
quaxsze/flask-file-system
|
5ab2cb5c4b5f2b91b53153574d035a924eb6d74c
|
[
"MIT"
] | null | null | null |
tests/test_mongoengine.py
|
quaxsze/flask-file-system
|
5ab2cb5c4b5f2b91b53153574d035a924eb6d74c
|
[
"MIT"
] | null | null | null |
import filecmp
import os
from PIL import Image
import flask_file_system as fs
from flask_file_system.mongo import FileField, ImageField
from flask_mongoengine import MongoEngine
import pytest
db = MongoEngine()
class MongoEngineTestCase:
@pytest.fixture(autouse=True)
def storage(self, app, tmpdir):
app.instance_path = str(tmpdir)
storage = fs.Storage('test', fs.ALL)
fs.init_app(app, storage)
db.init_app(app)
yield storage
with app.test_request_context():
db_name = app.config['MONGODB_DB']
try:
db.connection.client.drop_database(db_name)
except TypeError:
db.connection.drop_database(db_name)
class FileFieldTest(MongoEngineTestCase):
def test_default_validate(self, storage):
class Tester(db.Document):
file = FileField(fs=storage)
tester = Tester()
assert tester.validate() is None
assert not tester.file
assert str(tester.file) == ''
assert tester.to_mongo() == {}
assert tester.file.filename is None
def test_set_filename(self, storage):
class Tester(db.Document):
file = FileField(fs=storage)
filename = 'file.test'
tester = Tester()
tester.file = filename
assert tester.validate() is None
assert tester.file
assert tester.file.filename == filename
assert tester.to_mongo() == {
'file': {
'filename': filename,
}
}
tester.save()
tester.reload()
assert tester.file.filename == filename
def test_save_from_file(self, storage, binfile):
class Tester(db.Document):
file = FileField(fs=storage)
filename = 'test.png'
tester = Tester()
f = open(binfile, 'rb')
tester.file.save(f, filename)
tester.validate()
assert tester.file
assert str(tester.file) == tester.file.url
assert tester.file.filename == filename
assert tester.to_mongo() == {
'file': {
'filename': filename,
}
}
assert filename in storage
assert filecmp.cmp(storage.path(filename), binfile)
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == filename
def test_save_from_filestorage(self, storage, utils):
class Tester(db.Document):
file = FileField(fs=storage)
filename = 'test.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'))
tester.validate()
assert tester.file
assert str(tester.file) == tester.file.url
assert tester.file.filename == filename
assert tester.to_mongo() == {
'file': {
'filename': filename,
}
}
assert filename in storage
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == filename
def test_save_with_upload_to(self, storage, utils):
upload_to = 'prefix'
class Tester(db.Document):
file = FileField(fs=storage, upload_to=upload_to)
filename = 'test.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'))
tester.validate()
expected_filename = '/'.join([upload_to, filename])
assert tester.file
assert tester.file.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'file': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == expected_filename
def test_save_with_callable_upload_to(self, storage, utils):
upload_to = 'prefix'
class Tester(db.Document):
file = FileField(fs=storage, upload_to=lambda o: upload_to)
filename = 'test.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'))
tester.validate()
expected_filename = '/'.join([upload_to, filename])
assert tester.file
assert tester.file.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'file': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == expected_filename
def test_save_with_callable_basename(self, storage, utils):
class Tester(db.Document):
file = FileField(fs=storage, basename=lambda o: 'prefix/filename')
filename = 'test.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'))
tester.validate()
expected_filename = 'prefix/filename.txt'
assert tester.file
assert tester.file.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'file': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == expected_filename
def test_save_with_callable_basename_override(self, storage, utils):
class Tester(db.Document):
file = FileField(fs=storage, basename=lambda o: 'prefix/filename')
filename = 'test.txt'
expected_filename = 'other.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'), expected_filename)
tester.validate()
assert tester.file
assert tester.file.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'file': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == expected_filename
class ImageFieldTestMixin(MongoEngineTestCase):
@pytest.fixture
def resource(self, utils, image):
return utils.filestorage('flask.{0}'.format(self.ext), image)
def test_default_validate(self, storage):
class Tester(db.Document):
image = ImageField(fs=storage)
tester = Tester()
assert tester.validate() is None
assert not tester.image
assert str(tester.image) == ''
assert tester.to_mongo() == {}
assert tester.image.filename is None
assert tester.image.original is None
def test_save_file(self, storage, image):
class Tester(db.Document):
image = ImageField(fs=storage)
filename = 'test.{0}'.format(self.ext)
tester = Tester()
tester.image.save(image, filename)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
with open(storage.path(filename), 'rb') as f_stored:
stored = Image.open(f_stored)
original = Image.open(image)
assert stored.size == original.size
def test_save_filestorage(self, storage, resource, image):
class Tester(db.Document):
image = ImageField(fs=storage)
filename = 'flask.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
with open(storage.path(filename), 'rb') as f_stored:
stored = Image.open(f_stored)
original = Image.open(image)
assert stored.size == original.size
def test_save_optimize_settings(self, app, storage, resource, image):
app.config['FS_IMAGES_OPTIMIZE'] = True
class Tester(db.Document):
image = ImageField(fs=storage)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'original': filename_original,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename_original
path_original = storage.path(filename_original)
path_optimized = storage.path(filename)
with open(path_original, 'rb') as f_orig:
with open(path_optimized, 'rb') as f_optimized:
source = Image.open(image)
original = Image.open(f_orig)
optimized = Image.open(f_optimized)
assert original.size == source.size
assert optimized.size == source.size
assert os.stat(path_optimized).st_size < os.stat(path_original).st_size
def test_save_optimize_attribute(self, app, storage, resource, image):
class Tester(db.Document):
image = ImageField(fs=storage, optimize=True)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'original': filename_original,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename_original
path_original = storage.path(filename_original)
path_optimized = storage.path(filename)
with open(path_original, 'rb') as f_orig:
with open(path_optimized, 'rb') as f_optimized:
source = Image.open(image)
original = Image.open(f_orig)
optimized = Image.open(f_optimized)
assert original.size == source.size
assert optimized.size == source.size
assert os.stat(path_optimized).st_size < os.stat(path_original).st_size
def test_save_max_size(self, storage, resource, image):
max_size = 150
class Tester(db.Document):
image = ImageField(fs=storage, max_size=max_size)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'original': filename_original,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename_original
with open(storage.path(filename_original), 'rb') as f_orig:
with open(storage.path(filename), 'rb') as f_resized:
source = Image.open(image)
original = Image.open(f_orig)
resized = Image.open(f_resized)
assert original.size == source.size
assert resized.size[0] <= max_size
assert resized.size[1] <= max_size
resized_ratio = resized.size[0] / resized.size[1]
source_ratio = source.size[0] / source.size[1]
assert resized_ratio == pytest.approx(source_ratio, 1)
def test_save_thumbnails(self, storage, image, resource):
sizes = [150, 32]
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
filename = 'flask.{0}'.format(self.ext)
filename_150 = 'flask-150.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
with pytest.raises(ValueError):
tester.image.thumbnail(200)
assert filename in storage
assert filename_32 in storage
assert filename_150 in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'thumbnails': {
'32': filename_32,
'150': filename_150,
},
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
with open(storage.path(filename), 'rb') as f_orig:
with open(storage.path(filename_32), 'rb') as f_32:
with open(storage.path(filename_150), 'rb') as f_150:
source = Image.open(image)
original = Image.open(f_orig)
thumb_32 = Image.open(f_32)
thumb_150 = Image.open(f_150)
assert original.size == source.size
assert thumb_32.size <= (32, 32)
assert thumb_150.size <= (150, 150)
def test_save_thumbnails_with_bbox(self, storage, resource, image):
sizes = [150, 32]
bbox = (10, 10, 100, 100)
filename = 'flask.{0}'.format(self.ext)
filename_150 = 'flask-150.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
tester = Tester()
tester.image.save(resource, bbox=bbox)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
# self.assertSequenceEqual(tester.image.bbox, bbox)
assert tester.image.bbox == bbox
with pytest.raises(ValueError):
tester.image.thumbnail(200)
assert filename in storage
assert filename_32 in storage
assert filename_150 in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'bbox': (10, 10, 100, 100),
'thumbnails': {
'32': filename_32,
'150': filename_150,
},
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
assert tuple(tester.image.bbox) == tuple(bbox)
# self.assertSequenceEqual(tester.image.bbox, bbox)
# with image as f:
with open(storage.path(filename), 'rb') as f_orig:
with open(storage.path(filename_32), 'rb') as f_32:
with open(storage.path(filename_150), 'rb') as f_150:
source = Image.open(image)
original = Image.open(f_orig)
thumb_32 = Image.open(f_32)
thumb_150 = Image.open(f_150)
assert original.size == source.size
assert thumb_32.size <= (32, 32)
assert thumb_150.size <= (150, 150)
def test_save_wih_two_fields(self, storage, resource):
sizes = [32]
bbox = (10, 10, 100, 100)
filename = 'flask.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
filename2 = 'flask2.{0}'.format(self.ext)
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
image2 = ImageField(fs=storage)
tester = Tester()
tester.image.save(resource, bbox=bbox)
tester.image2.save(resource, filename='flask2.{0}'.format(self.ext))
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.thumbnail(32) == filename_32
assert tuple(tester.image.bbox) == tuple(bbox)
assert tester.image2
assert str(tester.image2) == tester.image2.url
assert tester.image2.filename == filename2
assert tester.image2.bbox is None
assert filename in storage
assert filename_32 in storage
assert filename2 in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'bbox': (10, 10, 100, 100),
'thumbnails': {
'32': filename_32,
},
},
'image2': {
'filename': filename2,
}
}
def test_save_and_update(self, storage, resource):
sizes = [150, 32]
bbox = (10, 10, 100, 100)
filename = 'flask.{0}'.format(self.ext)
filename_150 = 'flask-150.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
tester = Tester.objects.create()
tester.image.save(resource, bbox=bbox)
assert tester._changed_fields == ['image']
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
assert tuple(tester.image.bbox) == tuple(bbox)
def test_best_match(self, storage, resource):
sizes = [150, 32]
# filename = 'flask.{0}'.format(self.ext)
filename_150 = 'flask-150.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
filename2 = 'flask2.{0}'.format(self.ext)
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
image2 = ImageField(fs=storage)
tester = Tester()
assert tester.image(150) is None
assert tester.image.best_url() is None
tester.image.save(resource)
tester.image2.save(resource, filename2)
assert tester.image.best_url(150) == storage.url(filename_150)
assert tester.image.best_url(140) == storage.url(filename_150)
assert tester.image.best_url(100) == storage.url(filename_150)
assert tester.image.best_url(32) == storage.url(filename_32)
assert tester.image.best_url(30) == storage.url(filename_32)
assert tester.image.best_url(160) == storage.url(filename_150)
assert tester.image.best_url() == storage.url(filename_150)
assert tester.image(150) == storage.url(filename_150)
assert tester.image(140) == storage.url(filename_150)
assert tester.image(160) == storage.url(filename_150)
assert tester.image2.best_url(150) == storage.url(filename2)
assert tester.image2.best_url() == storage.url(filename2)
def test_full(self, storage, resource):
max_size = 150
class Tester(db.Document):
image = ImageField(fs=storage, max_size=max_size)
filename = 'flask.{0}'.format(self.ext)
tester = Tester()
assert tester.image.full() is None
assert tester.image.full(external=True) is None
tester.image.save(resource)
assert tester.image.full() == storage.url(filename)
assert tester.image.full(external=True) == storage.url(filename, external=True)
def test_save_with_upload_to(self, storage, resource):
upload_to = 'prefix'
class Tester(db.Document):
image = ImageField(fs=storage, upload_to=upload_to)
filename = 'flask.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
expected_filename = '/'.join([upload_to, filename])
assert tester.image
assert tester.image.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'image': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == expected_filename
def test_save_with_callable_upload_to(self, storage, resource):
upload_to = 'prefix'
class Tester(db.Document):
image = ImageField(fs=storage, upload_to=lambda o: upload_to)
filename = 'flask.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
expected_filename = '/'.join([upload_to, filename])
assert tester.image
assert tester.image.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'image': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == expected_filename
def test_save_with_callable_basename(self, storage, resource):
class Tester(db.Document):
image = ImageField(fs=storage, basename=lambda o: 'prefix/filename')
tester = Tester()
tester.image.save(resource)
tester.validate()
expected_filename = 'prefix/filename.{0}'.format(self.ext)
assert tester.image
assert tester.image.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'image': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == expected_filename
def test_save_with_callable_basename_override(self, storage, resource):
class Tester(db.Document):
image = ImageField(fs=storage, basename=lambda o: 'prefix/filename')
expected_filename = 'other.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource, expected_filename)
tester.validate()
assert tester.image
assert tester.image.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'image': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == expected_filename
def test_rerender(self, app, storage, resource, image):
class Tester(db.Document):
image = ImageField(fs=storage, optimize=True)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
storage.write(filename, image)
tester = Tester()
tester.image.filename = filename
assert tester.to_mongo() == {
'image': {
'filename': filename,
}
}
tester.image.rerender()
tester.save().reload()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'_id': tester.pk,
'image': {
'filename': filename,
'original': filename_original,
}
}
path_original = storage.path(filename_original)
path_optimized = storage.path(filename)
with open(path_original, 'rb') as f_orig:
with open(path_optimized, 'rb') as f_optimized:
source = Image.open(image)
original = Image.open(f_orig)
optimized = Image.open(f_optimized)
assert original.size == source.size
assert optimized.size == source.size
assert os.stat(path_optimized).st_size < os.stat(path_original).st_size
def test_rerender_multiple(self, app, storage, resource, image):
class Tester(db.Document):
image = ImageField(fs=storage, max_size=100, optimize=True)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
storage.write(filename_original, image)
tester = Tester()
tester.image.original = filename_original
tester.image.filename = filename
assert tester.to_mongo() == {
'image': {
'original': filename_original,
'filename': filename,
}
}
tester.image.rerender()
tester.save().reload()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'_id': tester.pk,
'image': {
'filename': filename,
'original': filename_original,
}
}
path_original = storage.path(filename_original)
path_optimized = storage.path(filename)
with open(path_original, 'rb') as f_orig:
with open(path_optimized, 'rb') as f_optimized:
source = Image.open(image)
original = Image.open(f_orig)
optimized = Image.open(f_optimized)
assert original.size == source.size
assert optimized.size[0] == 100
assert os.stat(path_optimized).st_size < os.stat(path_original).st_size
class ImageFieldPngTest(ImageFieldTestMixin):
ext = 'png'
@pytest.fixture
def image(self, pngfile):
with open(pngfile, 'rb') as f:
yield f
class ImageFieldJpgTest(ImageFieldTestMixin):
ext = 'jpg'
@pytest.fixture
def image(self, jpgfile):
with open(jpgfile, 'rb') as f:
yield f
| 32.687643
| 91
| 0.58931
| 3,104
| 28,569
| 5.309923
| 0.048647
| 0.102657
| 0.087671
| 0.029729
| 0.87999
| 0.852627
| 0.821866
| 0.80961
| 0.801177
| 0.761315
| 0
| 0.021044
| 0.303056
| 28,569
| 873
| 92
| 32.725086
| 0.80674
| 0.00546
| 0
| 0.737537
| 0
| 0
| 0.040024
| 0
| 0
| 0
| 0
| 0
| 0.31085
| 1
| 0.043988
| false
| 0
| 0.010264
| 0.001466
| 0.104106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b436efddde2ce58b5096ef271610f8580a85684b
| 197
|
py
|
Python
|
tests/test_constants.py
|
dynamist/phabfive
|
831e0e7f38d6299785157722153dc38cbbb6f29a
|
[
"Apache-2.0"
] | 1
|
2018-10-24T08:53:58.000Z
|
2018-10-24T08:53:58.000Z
|
tests/test_constants.py
|
dynamist/phabfive
|
831e0e7f38d6299785157722153dc38cbbb6f29a
|
[
"Apache-2.0"
] | 26
|
2018-10-24T08:33:09.000Z
|
2022-03-17T09:24:49.000Z
|
tests/test_constants.py
|
dynamist/phabfive
|
831e0e7f38d6299785157722153dc38cbbb6f29a
|
[
"Apache-2.0"
] | 1
|
2018-10-24T11:09:40.000Z
|
2018-10-24T11:09:40.000Z
|
# -*- coding: utf-8 -*-
def test_status_choices():
from phabfive.constants import REPO_STATUS_CHOICES
assert "active" in REPO_STATUS_CHOICES
assert "inactive" in REPO_STATUS_CHOICES
| 21.888889
| 54
| 0.741117
| 26
| 197
| 5.307692
| 0.615385
| 0.376812
| 0.369565
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006135
| 0.172589
| 197
| 8
| 55
| 24.625
| 0.840491
| 0.106599
| 0
| 0
| 0
| 0
| 0.08046
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b484fd17d97c04021a04358e791c9f20bd1006bb
| 1,997
|
py
|
Python
|
search_binary/solution.py
|
kevinzen/learning
|
148129a1ec48e86e74c6ed244ba50ab682ebf00b
|
[
"MIT"
] | null | null | null |
search_binary/solution.py
|
kevinzen/learning
|
148129a1ec48e86e74c6ed244ba50ab682ebf00b
|
[
"MIT"
] | null | null | null |
search_binary/solution.py
|
kevinzen/learning
|
148129a1ec48e86e74c6ed244ba50ab682ebf00b
|
[
"MIT"
] | null | null | null |
class Solution(object):
def binary_search(self, list, target):
length = len(list)
min = 0
max = length - 1
if length == 0:
return -1
# [2, 3, 4, 6, 9, 11, 12, 17, 18]
# 1: min = 0, max = 7, m = 4, list[m] = 9
# 2: min = 4, max = 7, m = 5, list[m] = 11
while max >= min:
m = (min + max) // 2 # floor
if list[m] < target:
min = m + 1
elif list[m] > target:
max = m - 1
else:
return m
# not found
return -1 # not found
def binary_search_leftmost(self, list, target):
length = len(list)
min = 0
max = length
if length == 0:
return -1
# [2, 3, 4, 6, 9, 11, 12, 17, 18]
# 1: min = 0, max = 8, m = 4, list[m] = 9
# 2: min = 5, max = 8, m = 6, list[m] = 12
# 3: min = 5, max = 6, m = 5, list[m] = 11
# 4: min = 5, max = 5, m = 5, list[m] = 11
while max > min:
m = (min + max) // 2 # floor
if list[m] < target:
min = m + 1
else:
max = m
# not found
if list[m] == target:
return m
else:
return -1 # not found
def binary_search_rightmost(self, list, target):
length = len(list)
min = 0
max = length
if length == 0:
return -1
# [2, 3, 4, 6, 9, 11, 12, 17, 18]
# 1: min = 0, max = 8, m = 4, list[m] = 9
# 2: min = 5, max = 8, m = 6, list[m] = 12
# 3: min = 5, max = 6, m = 5, list[m] = 11
# 4: min = 6, max = 6, m = 5
while max > min:
m = (min + max) // 2
if list[m] > target:
max = m
else:
min = m + 1
# not found
if list[m] == target:
return m
else:
return -1 # not found
| 24.654321
| 52
| 0.375063
| 276
| 1,997
| 2.695652
| 0.137681
| 0.100806
| 0.056452
| 0.087366
| 0.848118
| 0.807796
| 0.807796
| 0.705645
| 0.705645
| 0.705645
| 0
| 0.11276
| 0.493741
| 1,997
| 80
| 53
| 24.9625
| 0.623145
| 0.280421
| 0
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.270833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81f15bfa731c54f40f2d53582669abf996dc1405
| 120
|
py
|
Python
|
module2/string2.py
|
zubrik13/stepic_python
|
72def2a2c2d45d8ff47a94a6ba6bc4936459046d
|
[
"MIT"
] | null | null | null |
module2/string2.py
|
zubrik13/stepic_python
|
72def2a2c2d45d8ff47a94a6ba6bc4936459046d
|
[
"MIT"
] | null | null | null |
module2/string2.py
|
zubrik13/stepic_python
|
72def2a2c2d45d8ff47a94a6ba6bc4936459046d
|
[
"MIT"
] | null | null | null |
s = 'abcdefghijk'
print(s[3:6])
print(s[:6])
print(s[3:])
print(s[::-1])
print(s[-3:])
print(s[:-6])
print(s[-1:-10:-2])
| 15
| 19
| 0.541667
| 26
| 120
| 2.5
| 0.307692
| 0.646154
| 0.323077
| 0.369231
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099099
| 0.075
| 120
| 8
| 19
| 15
| 0.486486
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.875
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
81fd6fd5e982627b21efb398c1be7d0f910b9f75
| 3,473
|
py
|
Python
|
python/tests/generated/errors/validation/test_missing_fieldset_entry.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 17
|
2019-04-15T21:03:37.000Z
|
2022-01-24T11:03:34.000Z
|
python/tests/generated/errors/validation/test_missing_fieldset_entry.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 20
|
2019-03-13T23:23:40.000Z
|
2022-03-29T13:40:57.000Z
|
python/tests/generated/errors/validation/test_missing_fieldset_entry.py
|
eno-lang/enolib
|
4175f7c1e8246493b6758c29bddc80d20eaf15f7
|
[
"MIT"
] | 4
|
2019-04-15T21:18:03.000Z
|
2019-09-21T16:18:10.000Z
|
import enolib
def test_querying_an_empty_fieldset_for_a_required_but_missing_entry_raises_the_expected_validationerror():
error = None
input = ("fieldset:")
try:
enolib.parse(input).fieldset('fieldset').required_entry('entry')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("The fieldset entry 'entry' is missing - in case it has been specified look for typos and also check for correct capitalization.")
assert error.text == text
snippet = (" Line | Content\n"
" * 1 | fieldset:")
assert error.snippet == snippet
assert error.selection['from']['line'] == 0
assert error.selection['from']['column'] == 9
assert error.selection['to']['line'] == 0
assert error.selection['to']['column'] == 9
def test_querying_a_fieldset_with_two_entries_for_a_required_but_missing_entry_raises_the_expected_validationerror():
error = None
input = ("fieldset:\n"
"entry = value\n"
"entry = value")
try:
enolib.parse(input).fieldset('fieldset').required_entry('missing')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("The fieldset entry 'missing' is missing - in case it has been specified look for typos and also check for correct capitalization.")
assert error.text == text
snippet = (" Line | Content\n"
" * 1 | fieldset:\n"
" ? 2 | entry = value\n"
" ? 3 | entry = value")
assert error.snippet == snippet
assert error.selection['from']['line'] == 0
assert error.selection['from']['column'] == 9
assert error.selection['to']['line'] == 0
assert error.selection['to']['column'] == 9
def test_querying_a_fieldset_with_entries_empty_lines_and_comments_for_a_required_but_missing_entry_raises_the_expected_validationerror():
error = None
input = ("fieldset:\n"
"\n"
"> comment\n"
"entry = value\n"
"\n"
"> comment\n"
"entry = value")
try:
enolib.parse(input).fieldset('fieldset').required_entry('missing')
except enolib.ValidationError as _error:
if isinstance(_error, enolib.ValidationError):
error = _error
else:
raise _error
assert type(error) is enolib.ValidationError
text = ("The fieldset entry 'missing' is missing - in case it has been specified look for typos and also check for correct capitalization.")
assert error.text == text
snippet = (" Line | Content\n"
" * 1 | fieldset:\n"
" ? 2 | \n"
" ? 3 | > comment\n"
" ? 4 | entry = value\n"
" ? 5 | \n"
" ? 6 | > comment\n"
" ? 7 | entry = value")
assert error.snippet == snippet
assert error.selection['from']['line'] == 0
assert error.selection['from']['column'] == 9
assert error.selection['to']['line'] == 0
assert error.selection['to']['column'] == 9
| 33.394231
| 144
| 0.583357
| 383
| 3,473
| 5.122715
| 0.182768
| 0.100917
| 0.122324
| 0.073395
| 0.925586
| 0.925586
| 0.911315
| 0.911315
| 0.88685
| 0.88685
| 0
| 0.009492
| 0.302332
| 3,473
| 104
| 145
| 33.394231
| 0.800248
| 0
| 0
| 0.792208
| 0
| 0.038961
| 0.263961
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 1
| 0.038961
| false
| 0
| 0.012987
| 0
| 0.051948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c3146853f40bc145c127ac9759899475293aa3f1
| 4,497
|
py
|
Python
|
tests/test_converters.py
|
aalekhpatel07/retworkx
|
ae93fcab17d55bc259476c65a677221b4177870a
|
[
"Apache-2.0"
] | 1
|
2021-11-29T23:15:07.000Z
|
2021-11-29T23:15:07.000Z
|
tests/test_converters.py
|
aalekhpatel07/retworkx
|
ae93fcab17d55bc259476c65a677221b4177870a
|
[
"Apache-2.0"
] | 40
|
2020-08-31T06:09:06.000Z
|
2022-03-18T19:02:34.000Z
|
tests/test_converters.py
|
aalekhpatel07/retworkx
|
ae93fcab17d55bc259476c65a677221b4177870a
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import retworkx
import networkx
class TestNetworkxConverter(unittest.TestCase):
def test_undirected_gnm_graph(self):
g = networkx.gnm_random_graph(10, 10, seed=42)
out_graph = retworkx.networkx_converter(g)
self.assertIsInstance(out_graph, retworkx.PyGraph)
self.assertEqual(out_graph.nodes(), list(g.nodes))
self.assertEqual(
out_graph.weighted_edge_list(), list(g.edges(data=True))
)
self.assertEqual(out_graph.multigraph, g.is_multigraph())
def test_directed_gnm_graph(self):
g = networkx.gnm_random_graph(10, 10, seed=42, directed=True)
out_graph = retworkx.networkx_converter(g)
self.assertIsInstance(out_graph, retworkx.PyDiGraph)
self.assertEqual(out_graph.nodes(), list(g.nodes))
self.assertEqual(
out_graph.weighted_edge_list(), list(g.edges(data=True))
)
self.assertEqual(out_graph.multigraph, g.is_multigraph())
def test_empty_graph(self):
g = networkx.Graph()
out_graph = retworkx.networkx_converter(g)
self.assertIsInstance(out_graph, retworkx.PyGraph)
self.assertEqual(out_graph.nodes(), list(g.nodes))
self.assertEqual(
out_graph.weighted_edge_list(), list(g.edges(data=True))
)
self.assertEqual(out_graph.multigraph, g.is_multigraph())
def test_empty_multigraph(self):
g = networkx.MultiGraph()
out_graph = retworkx.networkx_converter(g)
self.assertIsInstance(out_graph, retworkx.PyGraph)
self.assertEqual(out_graph.nodes(), list(g.nodes))
self.assertEqual(
out_graph.weighted_edge_list(), list(g.edges(data=True))
)
self.assertEqual(out_graph.multigraph, g.is_multigraph())
def test_empty_directed_graph(self):
g = networkx.DiGraph()
out_graph = retworkx.networkx_converter(g)
self.assertIsInstance(out_graph, retworkx.PyDiGraph)
self.assertEqual(out_graph.nodes(), list(g.nodes))
self.assertEqual(
out_graph.weighted_edge_list(), list(g.edges(data=True))
)
self.assertEqual(out_graph.multigraph, g.is_multigraph())
def test_empty_directed_multigraph(self):
g = networkx.MultiDiGraph()
out_graph = retworkx.networkx_converter(g)
self.assertIsInstance(out_graph, retworkx.PyDiGraph)
self.assertEqual(out_graph.nodes(), list(g.nodes))
self.assertEqual(
out_graph.weighted_edge_list(), list(g.edges(data=True))
)
self.assertEqual(out_graph.multigraph, g.is_multigraph())
def test_cubical_graph(self):
g = networkx.cubical_graph(networkx.Graph)
out_graph = retworkx.networkx_converter(g)
self.assertIsInstance(out_graph, retworkx.PyGraph)
self.assertEqual(out_graph.nodes(), list(g.nodes))
self.assertEqual(
out_graph.weighted_edge_list(), list(g.edges(data=True))
)
self.assertEqual(out_graph.multigraph, g.is_multigraph())
def test_cubical_multigraph(self):
g = networkx.cubical_graph(networkx.MultiGraph)
out_graph = retworkx.networkx_converter(g)
self.assertIsInstance(out_graph, retworkx.PyGraph)
self.assertEqual(out_graph.nodes(), list(g.nodes))
self.assertEqual(
out_graph.weighted_edge_list(), list(g.edges(data=True))
)
self.assertEqual(out_graph.multigraph, g.is_multigraph())
def test_random_k_out_graph(self):
g = networkx.random_k_out_graph(100, 50, 3.14159, True, 42)
out_graph = retworkx.networkx_converter(g)
self.assertIsInstance(out_graph, retworkx.PyDiGraph)
self.assertEqual(out_graph.nodes(), list(g.nodes))
self.assertEqual(
out_graph.weighted_edge_list(), list(g.edges(data=True))
)
self.assertEqual(out_graph.multigraph, g.is_multigraph())
| 41.638889
| 75
| 0.686902
| 563
| 4,497
| 5.284192
| 0.179396
| 0.126387
| 0.163361
| 0.20874
| 0.766387
| 0.766387
| 0.74958
| 0.74958
| 0.74958
| 0.74958
| 0
| 0.008132
| 0.207027
| 4,497
| 107
| 76
| 42.028037
| 0.826136
| 0.115855
| 0
| 0.635294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.423529
| 1
| 0.105882
| false
| 0
| 0.035294
| 0
| 0.152941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c3481116e2bf9a2c0723e55038879b174ad991d1
| 3,316
|
py
|
Python
|
correlationsMetadata.py
|
jklewis99/magical-movie-poster-processing
|
88aefe4c446fd3d8366b527f59e20c04ac584fb4
|
[
"MIT"
] | 1
|
2020-11-02T17:00:52.000Z
|
2020-11-02T17:00:52.000Z
|
correlationsMetadata.py
|
jklewis99/magical-movie-poster-processing
|
88aefe4c446fd3d8366b527f59e20c04ac584fb4
|
[
"MIT"
] | null | null | null |
correlationsMetadata.py
|
jklewis99/magical-movie-poster-processing
|
88aefe4c446fd3d8366b527f59e20c04ac584fb4
|
[
"MIT"
] | 1
|
2022-01-26T19:26:56.000Z
|
2022-01-26T19:26:56.000Z
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pdb
import os
import skimage.io as io
from scipy.stats import pearsonr
# -------------------------- movies-metadata.csv -------------------------------
# Read csv
features = pd.read_csv("data/movies-metadata.csv", thousands=',')
# Drop first row
features = features.dropna(axis=0)
# Extract values of columns needed
revenue = features["Box_office"].values.astype(np.float)
imdb = features["imdbVotes"].values.astype(np.float)
imdbRating = features["imdbRating"].values.astype(np.float)
# Evaluation Pearson r correlation
correlation, _ = pearsonr(imdb, revenue)
# Figure 1: Shows correlation between columns
plt.figure(1, figsize = (20, 7))
plt.subplot(2, 2, 1)
plt.subplots_adjust(wspace = 0.2, hspace = 0.5)
plt.scatter(imdb, revenue, s = 1, marker = "o", facecolor = "none", edgecolor = "blue")
plt.title("Revenue vs imdb Votes", fontsize=16)
plt.xlabel("imdb Votes", fontsize=14)
plt.ylabel("Revenue", fontsize=14)
plt.annotate(f"Pearson-R = {correlation:.2f}", (np.min(imdb), 0.98*np.max(revenue)), fontsize=12) # plot the value on the graph
# Evaluation Pearson r correlation
correlation2, _ = pearsonr(imdbRating, revenue)
# Figure 2: Shows correlation between columns
plt.subplot(2, 2, 2)
plt.scatter(imdbRating, revenue, s = 1, marker = "o", facecolor = "none", edgecolor = "blue")
plt.title("Revenue vs imdb Rating", fontsize=16)
plt.xlabel("imdb Rating", fontsize=14)
plt.ylabel("Revenue", fontsize=14)
plt.annotate(f"Pearson-R = {correlation2:.2f}", (np.min(imdbRating), 0.98*np.max(revenue)), fontsize=12) # plot the value on the graph
# ------------------------------------------------------------------------------
# -------------------------- movies-metadata-cleaned.csv -----------------------
# Read csv
features = pd.read_csv("data/movies-metadata-cleaned.csv", thousands=',')
# Drop first row
features = features.dropna(axis=0)
# Extract values of columns needed
revenue = features["Box_office"].values.astype(np.float)
imdb = features["imdbVotes"].values.astype(np.float)
imdbRating = features["imdbRating"].values.astype(np.float)
# Evaluation Pearson r correlation
correlation, _ = pearsonr(imdb, revenue)
# Figure 3: Shows correlation between columns
plt.figure(2, figsize = (20, 7))
plt.subplot(2, 2, 1)
plt.subplots_adjust(wspace = 0.2, hspace = 0.5)
plt.scatter(imdb, revenue, s = 1, marker = "o", facecolor = "none", edgecolor = "blue")
plt.title("Revenue vs imdb Votes", fontsize=16)
plt.xlabel("imdb Votes", fontsize=14)
plt.ylabel("Revenue", fontsize=14)
plt.annotate(f"Pearson-R = {correlation:.2f}", (np.min(imdb), 0.98*np.max(revenue)), fontsize=12) # plot the value on the graph
# Evaluation Pearson r correlation
correlation2, _ = pearsonr(imdbRating, revenue)
# Figure 4: Shows correlation between columns
plt.subplot(2, 2, 2)
plt.scatter(imdbRating, revenue, s = 1, marker = "o", facecolor = "none", edgecolor = "blue")
plt.title("Revenue vs imdb Rating", fontsize=16)
plt.xlabel("imdb Rating", fontsize=14)
plt.ylabel("Revenue", fontsize=14)
plt.annotate(f"Pearson-R = {correlation2:.2f}", (np.min(imdbRating), 0.98*np.max(revenue)), fontsize=12) # plot the value on the graph
# ------------------------------------------------------------------------------
plt.show()
| 37.258427
| 134
| 0.670688
| 453
| 3,316
| 4.887417
| 0.211921
| 0.028907
| 0.046974
| 0.051491
| 0.915989
| 0.915989
| 0.880759
| 0.880759
| 0.880759
| 0.880759
| 0
| 0.030847
| 0.110374
| 3,316
| 88
| 135
| 37.681818
| 0.719661
| 0.256031
| 0
| 0.745098
| 0
| 0
| 0.174304
| 0.022913
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.156863
| 0
| 0.156863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f2058d9428b0e83dfae6878eb303e40e82b5f24
| 157
|
py
|
Python
|
engine/__init__.py
|
ricarhincapie/Torre_Dev
|
55327a39bdf12b35dca229d08a62c1c7a8549a02
|
[
"CC0-1.0"
] | null | null | null |
engine/__init__.py
|
ricarhincapie/Torre_Dev
|
55327a39bdf12b35dca229d08a62c1c7a8549a02
|
[
"CC0-1.0"
] | null | null | null |
engine/__init__.py
|
ricarhincapie/Torre_Dev
|
55327a39bdf12b35dca229d08a62c1c7a8549a02
|
[
"CC0-1.0"
] | null | null | null |
""" Blueprint for API
"""
from flask import Blueprint
app_views = Blueprint('app_views', __name__, url_prefix='/api/v1')
from api.v1.views.index import *
| 19.625
| 66
| 0.726115
| 23
| 157
| 4.652174
| 0.565217
| 0.224299
| 0.317757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 0.133758
| 157
| 7
| 67
| 22.428571
| 0.772059
| 0.10828
| 0
| 0
| 0
| 0
| 0.122137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
6f30c9e9d3f4d8870acfea79baf51fc176decd77
| 16,390
|
py
|
Python
|
Code/Tic_Tac_Toe_1.py
|
codeholmes/Tic-Tac-Toe-Number-Edition
|
d3f88b5dc632237982e4eed0f61d2b5f0513a5b3
|
[
"MIT"
] | null | null | null |
Code/Tic_Tac_Toe_1.py
|
codeholmes/Tic-Tac-Toe-Number-Edition
|
d3f88b5dc632237982e4eed0f61d2b5f0513a5b3
|
[
"MIT"
] | 1
|
2020-09-05T05:21:02.000Z
|
2020-09-05T05:21:02.000Z
|
Code/Tic_Tac_Toe_1.py
|
codeholmes/Tic-Tac-Toe-Number-Edition
|
d3f88b5dc632237982e4eed0f61d2b5f0513a5b3
|
[
"MIT"
] | null | null | null |
# Author:
# Anish
import math
import sys
list_1 = [0,0,0]
list_2 = [0,0,0]
list_3 = [0,0,0]
board = [
list_1,
list_2,
list_3
]
non_zero_list = []
groove = []
alarm = 0
print("\n~ Instruction ~\n")
print("(1) Player-1 gets ODD numbers b/w 1 to 9 (i.e. 1, 3, 5, 7, 9),")
print("(2) Player-2 gets EVEN numbers b/w 1 to 9 (i.e. 2, 4, 6, 8),")
print("(3) The player who's three number sum is 15 wins,")
print("(4) Each player will get alternative turn to enter number,")
print("If you enter used/invalid number you will lost your turn,")
print("(5) The number can't be repeated,")
print("(6) First enter your desired tile number between 1 to 9,")
print("(7) Then your number,")
print("(8)The game will go-on until unless one player wins or all the players allotted numbers are used.")
print("(9) Enter Ctrl+C to exit the game!\n")
print("All the best, may the 15 be with you! :D")
print("\n~ Board ~\n")
for item in board:
print(item)
while True:
def player_1_move():
global player_1_tile, player_1_odd
player_1_tile = int(input("\n(Odd) Player - 1: ENTER TILE NUMBER >> ").strip())
player_1_odd = int(input("(Odd) Player - 1: ENTER ODD NUMBER >> ").strip())
print("\n")
for item in board:
for tile in item:
if tile == player_1_odd:
print("Pls enter number which is not used before! You lost your turn!\n")
player_2_move()
if player_1_odd == 1:
if player_1_tile <= 3 and player_1_tile >= 1:
player_1_tile = player_1_tile -1
if list_1[player_1_tile] == 0:
list_1[player_1_tile] = player_1_odd
#print("Working 1")
print("\n~ Board ~\n")
for item in board:
print(item)
#print("\n")
else:
print("Enter Un-used number!")
elif player_1_tile >= 3 and player_1_tile <= 6:
player_1_tile = player_1_tile -4
if list_2[player_1_tile] == 0:
list_2[player_1_tile] = player_1_odd
#print("Working 3")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_tile >= 6 and player_1_tile <= 9:
player_1_tile = player_1_tile -7
if list_3[player_1_tile] == 0:
list_3[player_1_tile] = player_1_odd
#print("Working 9")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_odd == 3:
if player_1_tile <= 3 and player_1_tile >= 1:
player_1_tile = player_1_tile -1
if list_1[player_1_tile] == 0:
list_1[player_1_tile] = player_1_odd
#print("Working 1")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_tile >= 3 and player_1_tile <= 6:
player_1_tile = player_1_tile -4
if list_2[player_1_tile] == 0:
list_2[player_1_tile] = player_1_odd
#print("Working 3")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_tile >= 6 and player_1_tile <= 9:
player_1_tile = player_1_tile -7
if list_3[player_1_tile] == 0:
list_3[player_1_tile] = player_1_odd
#print("Working 9")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_odd == 5:
if player_1_tile <= 3 and player_1_tile >= 1:
player_1_tile = player_1_tile -1
if list_1[player_1_tile] == 0:
list_1[player_1_tile] = player_1_odd
#print("Working 1")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_tile >= 3 and player_1_tile <= 6:
player_1_tile = player_1_tile -4
if list_2[player_1_tile] == 0:
list_2[player_1_tile] = player_1_odd
#print("Working 3")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_tile >= 6 and player_1_tile <= 9:
player_1_tile = player_1_tile -7
if list_3[player_1_tile] == 0:
list_3[player_1_tile] = player_1_odd
#print("Working 9")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_odd == 7:
if player_1_tile <= 3 and player_1_tile >= 1:
player_1_tile = player_1_tile -1
if list_1[player_1_tile] == 0:
list_1[player_1_tile] = player_1_odd
#print("Working 1")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_tile >= 3 and player_1_tile <= 6:
player_1_tile = player_1_tile -4
if list_2[player_1_tile] == 0:
list_2[player_1_tile] = player_1_odd
#print("Working 3")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_tile >= 6 and player_1_tile <= 9:
player_1_tile = player_1_tile -7
if list_3[player_1_tile] == 0:
list_3[player_1_tile] = player_1_odd
#print("Working 9")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_odd == 9:
if player_1_tile <= 3 and player_1_tile >= 1:
player_1_tile = player_1_tile -1
if list_1[player_1_tile] == 0:
list_1[player_1_tile] = player_1_odd
#print("Working 3")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_tile >= 3 and player_1_tile <= 6:
player_1_tile = player_1_tile -4
if list_2[player_1_tile] == 0:
list_2[player_1_tile] = player_1_odd
#print("Working 5")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_tile >= 6 and player_1_tile <= 9:
player_1_tile = player_1_tile -7
if list_3[player_1_tile] == 0:
list_3[player_1_tile] = player_1_odd
#print("Working 9")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_1_odd == 2 or player_1_odd == 4 or player_1_odd == 6 or player_1_odd == 8:
print("Oops, you entered even number, you lost your turn!")
for item in board:
print(item)
else:
print("Oops, your entered number is invalid, you lost your turn!")
for item in board:
print(item)
def player_2_move():
global player_2_tile, player_2_even
player_2_tile = int(input("\n(Even) Player - 2: ENTER TILE NUMBER >> ").strip())
player_2_even = int(input("(Even) Player - 2: ENTER EVEN NUMBER >> ").strip())
print("\n")
for item in board:
for tile in item:
if tile == player_2_even:
print("Pls enter number which is not used before! You lost your turn!")
player_1_move()
if player_2_even == 2:
if player_2_tile <= 3 and player_2_tile >= 1:
player_2_tile = player_2_tile -1
if list_1[player_2_tile] == 0:
list_1[player_2_tile] = player_2_even
#print("Working 1")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_tile >= 3 and player_2_tile <= 6:
player_2_tile = player_2_tile -4
if list_2[player_2_tile] == 0:
list_2[player_2_tile] = player_2_even
#print("Working 3")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_tile >= 6 and player_2_tile <= 9:
player_2_tile = player_2_tile -7
if list_3[player_2_tile] == 0:
list_3[player_2_tile] = player_2_even
#print("Working 9")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_even == 4:
if player_2_tile <= 3 and player_2_tile >= 1:
player_2_tile = player_2_tile -1
if list_1[player_2_tile] == 0:
list_1[player_2_tile] = player_2_even
#print("Working 1")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_tile >= 3 and player_2_tile <= 6:
player_2_tile = player_2_tile -4
if list_2[player_2_tile] == 0:
list_2[player_2_tile] = player_2_even
#print("Working 3")
for item in board:
print(item)
else:
print("Enter Un-used number!")
elif player_2_tile >= 6 and player_2_tile <= 9:
player_2_tile = player_2_tile -7
if list_3[player_2_tile] == 0:
list_3[player_2_tile] = player_2_even
#print("Working 9")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_even == 6:
if player_2_tile <= 3 and player_2_tile >= 1:
player_2_tile = player_2_tile -1
if list_1[player_2_tile] == 0:
list_1[player_2_tile] = player_2_even
#print("Working 1")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_tile >= 3 and player_2_tile <= 6:
player_2_tile = player_2_tile -4
if list_2[player_2_tile] == 0:
list_2[player_2_tile] = player_2_even
#print("Working 3")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_tile >= 6 and player_2_tile <= 9:
player_2_tile = player_2_tile -7
if list_3[player_2_tile] == 0:
list_3[player_2_tile] = player_2_even
#print("Working 9")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_even == 8:
if player_2_tile <= 3 and player_2_tile >= 1:
player_2_tile = player_2_tile -1
if list_1[player_2_tile] == 0:
list_1[player_2_tile] = player_2_even
#print("Working 1")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_tile >= 3 and player_2_tile <= 6:
player_2_tile = player_2_tile -4
if list_2[player_2_tile] == 0:
list_2[player_2_tile] = player_2_even
#print("Working 3")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_2_tile >= 6 and player_2_tile <= 9:
player_2_tile = player_2_tile -7
if list_3[player_2_tile] == 0:
list_3[player_2_tile] = player_2_even
#print("Working 9")
for item in board:
print(item)
else:
print("Enter number in empty tile only! You lost your turn!")
player_1_move()
elif player_1_odd == 1 or player_1_odd == 3 or player_1_odd == 5 or player_1_odd == 7 or player_1_odd == 7:
print("Oops, you entered odd number, you lost your turn!")
else:
print("Oops, your entered number is invalid, you lost your turn!")
def bingo_algo(winner):
if sum(list_1) == 15 and list_1[0] != 0 and list_1[1] != 0 and list_1[2] != 0:
print(winner)
sys.exit()
elif sum(list_2) == 15 and list_2[0] != 0 and list_2[0] != 0 and list_2[0] != 0:
print(winner)
sys.exit()
elif sum(list_3) == 15 and list_3[0] != 0 and list_3[0] != 0 and list_3[0] != 0:
print(winner)
sys.exit()
elif list_1[0] + list_2[0] + list_3[0] == 15 and list_1[0] != 0 and list_2[0] != 0 and list_3[0] != 0:
print(winner)
sys.exit()
elif list_1[1] + list_2[1] + list_3[1] == 15 and list_1[1] != 0 and list_2[2] != 0 and list_3[1] != 0:
print(winner)
sys.exit()
elif list_1[2] + list_2[2] + list_3[2] == 15 and list_1[2] != 0 and list_2[2] != 0 and list_3[2] != 0:
print(winner)
sys.exit()
elif list_1[0] + list_2[1] + list_3[2] == 15 and list_1[0] != 0 and list_2[1] != 0 and list_3[2] != 0:
print(winner)
sys.exit()
elif list_1[2] + list_2[1] + list_3[0] == 15 and list_1[2] != 0 and list_2[1] != 0 and list_3[0] != 0:
print(winner)
sys.exit()
def no_winner():
for lists in board:
for tiles in lists:
groove.append(tiles)
for item in groove:
if item != 0:
non_zero_list.append(tiles)
alarm = 1
if alarm == 1 and len(non_zero_list) == 9:
print("\n TIE! TIE!! TIE!!!")
sys.exit()
player_1_move()
bingo_algo("\nPlayer-1 Wins!\n")
no_winner()
player_2_move()
bingo_algo("\nPlayer-2 Wins!\n")
no_winner()
| 37.420091
| 115
| 0.473337
| 2,135
| 16,390
| 3.361124
| 0.054333
| 0.137542
| 0.141026
| 0.06243
| 0.837375
| 0.81898
| 0.812988
| 0.805323
| 0.790273
| 0.776059
| 0
| 0.067043
| 0.437584
| 16,390
| 438
| 116
| 37.420091
| 0.711434
| 0.031117
| 0
| 0.788235
| 0
| 0.008824
| 0.131356
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011765
| false
| 0
| 0.005882
| 0
| 0.017647
| 0.258824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f54f5f0b038f170b7e3ebca33070f29112ff889
| 4,578
|
py
|
Python
|
test/test_model.py
|
Humboldt155/DeepRecommender
|
406735733d38e1bd996b7ff918428e0685fddf2d
|
[
"MIT"
] | 1
|
2019-01-10T22:30:41.000Z
|
2019-01-10T22:30:41.000Z
|
test/test_model.py
|
Humboldt155/DeepRecommender
|
406735733d38e1bd996b7ff918428e0685fddf2d
|
[
"MIT"
] | null | null | null |
test/test_model.py
|
Humboldt155/DeepRecommender
|
406735733d38e1bd996b7ff918428e0685fddf2d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 NVIDIA Corporation
import unittest
import sys
import torch.optim as optim
from torch.autograd import Variable
from reco_encoder.data.input_layer import UserItemRecDataProvider
from reco_encoder.model.model import AutoEncoder, MSEloss
sys.path.append('data')
sys.path.append('model')
class iRecAutoEncoderTest(unittest.TestCase):
def test_CPU(self):
print("iRecAutoEncoderTest Test on CPU started")
params = {}
params['batch_size'] = 64
params['data_dir'] = 'test/testData_iRec'
data_layer = UserItemRecDataProvider(params=params)
print("Vector dim: {}".format(data_layer.vector_dim))
print("Total items found: {}".format(len(data_layer.data.keys())))
self.assertTrue(len(data_layer.data.keys())>0)
encoder = AutoEncoder(layer_sizes=[data_layer.vector_dim, 256, 128], is_constrained=True)
print(encoder)
print(encoder.parameters())
optimizer = optim.SGD(encoder.parameters(), lr=0.01, momentum=0.9)
for epoch in range(20):
for i, mb in enumerate(data_layer.iterate_one_epoch()):
inputs = Variable(mb.to_dense())
optimizer.zero_grad()
outputs = encoder(inputs)
loss, num_ratings = MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
print('[%d, %5d] loss: %.7f' % (epoch, i, loss.data[0]))
def test_GPU(self):
print("iRecAutoEncoderTest Test on GPU started")
params = {}
params['batch_size'] = 32
params['data_dir'] = 'test/testData_iRec'
data_layer = UserItemRecDataProvider(params=params)
print("Total items found: {}".format(len(data_layer.data.keys())))
self.assertTrue(len(data_layer.data.keys()) > 0)
encoder = AutoEncoder(layer_sizes=[data_layer.vector_dim, 1024, 512, 512, 512, 512, 128])
encoder.cuda()
optimizer = optim.Adam(encoder.parameters())
print(encoder)
for epoch in range(30):
total_epoch_loss = 0.0
denom = 0.0
for i, mb in enumerate(data_layer.iterate_one_epoch()):
inputs = Variable(mb.to_dense().cuda())
optimizer.zero_grad()
outputs = encoder(inputs)
loss, num_ratings = MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
total_epoch_loss += loss.data[0]
denom += 1
print("Total epoch {} loss: {}".format(epoch, total_epoch_loss/denom))
class uRecAutoEncoderTest(unittest.TestCase):
def test_CPU(self):
print("uRecAutoEncoderTest Test on CPU started")
params = {}
params['batch_size'] = 256
params['data_dir'] = 'test/testData_uRec'
data_layer = UserItemRecDataProvider(params=params)
print("Vector dim: {}".format(data_layer.vector_dim))
print("Total items found: {}".format(len(data_layer.data.keys())))
self.assertTrue(len(data_layer.data.keys())>0)
encoder = AutoEncoder(layer_sizes=[data_layer.vector_dim, 128, data_layer.vector_dim])
optimizer = optim.SGD(encoder.parameters(), lr=0.1, momentum=0.9)
for epoch in range(1):
for i, mb in enumerate(data_layer.iterate_one_epoch()):
inputs = Variable(mb.to_dense())
optimizer.zero_grad()
outputs = encoder(inputs)
loss, num_ratings = MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
print('[%d, %5d] loss: %.7f' % (epoch, i, loss.data[0]))
if i == 5: # too much compute for CPU
break
def test_GPU(self):
print("uRecAutoEncoderTest Test on GPU started")
params = {}
params['batch_size'] = 64
params['data_dir'] = 'test/testData_uRec'
data_layer = UserItemRecDataProvider(params=params)
print("Total items found: {}".format(len(data_layer.data.keys())))
self.assertTrue(len(data_layer.data.keys()) > 0)
encoder = AutoEncoder(layer_sizes=[data_layer.vector_dim, 1024, 512, 512, 128])
encoder.cuda()
optimizer = optim.Adam(encoder.parameters())
print(encoder)
for epoch in range(2):
total_epoch_loss = 0.0
denom = 0.0
for i, mb in enumerate(data_layer.iterate_one_epoch()):
inputs = Variable(mb.to_dense().cuda())
optimizer.zero_grad()
outputs = encoder(inputs)
loss, num_ratings = MSEloss(outputs, inputs)
loss = loss / num_ratings
loss.backward()
optimizer.step()
total_epoch_loss += loss.data[0]
denom += 1
print("Total epoch {} loss: {}".format(epoch, total_epoch_loss / denom))
if __name__ == '__main__':
unittest.main()
| 38.470588
| 93
| 0.662516
| 588
| 4,578
| 4.991497
| 0.185374
| 0.070528
| 0.032709
| 0.043612
| 0.849744
| 0.810903
| 0.810903
| 0.744804
| 0.713458
| 0.713458
| 0
| 0.025226
| 0.203364
| 4,578
| 118
| 94
| 38.79661
| 0.779545
| 0.013543
| 0
| 0.718182
| 0
| 0
| 0.114583
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 1
| 0.036364
| false
| 0
| 0.054545
| 0
| 0.109091
| 0.163636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
48dbb0583fea839a7a954739d22c48083866dabc
| 15,719
|
py
|
Python
|
boardgamegeek/objects/rpgs.py
|
zseymour/boardgamegeek
|
dbc1811b1a30c7e4e59d600dcaa22e792f3d1b9f
|
[
"BSD-3-Clause"
] | null | null | null |
boardgamegeek/objects/rpgs.py
|
zseymour/boardgamegeek
|
dbc1811b1a30c7e4e59d600dcaa22e792f3d1b9f
|
[
"BSD-3-Clause"
] | null | null | null |
boardgamegeek/objects/rpgs.py
|
zseymour/boardgamegeek
|
dbc1811b1a30c7e4e59d600dcaa22e792f3d1b9f
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
:mod:`boardgamegeek.games` - Games information
==============================================
.. module:: boardgamegeek.objects.rpgs
:platform: Unix, Windows
:synopsis: classes for storing rpg information
.. moduleauthor:: Cosmin Luță <q4break@gmail.com>
"""
from __future__ import unicode_literals
import datetime
from copy import copy
from .things import Thing
from .games import BaseGame, BoardGameVideo, BoardGameComment
from ..exceptions import BGGError
from ..utils import fix_url, DictObject, fix_unsigned_negative
class RPGGame(BaseGame):
"""
Object containing information about a role-playing game
"""
def __init__(self, data):
self._videos = []
self._videos_ids = set()
for video in data.get("videos", []):
try:
if video["id"] not in self._videos_ids:
self._videos.append(BoardGameVideo(video))
self._videos_ids.add(video["id"])
except KeyError:
raise BGGError("invalid video data")
self._comments = []
for comment in data.get("comments", []):
self.add_comment(comment)
super(RPGGame, self).__init__(data)
def __repr__(self):
return "RPGGame (id: {})".format(self.id)
def add_comment(self, data):
self._comments.append(BoardGameComment(data))
def _format(self, log):
log.info("rpg id : {}".format(self.id))
log.info("rpg name : {}".format(self.name))
log.info("rpg rank : {}".format(self.bgg_rank))
if self.alternative_names:
for i in self.alternative_names:
log.info("alternative name : {}".format(i))
log.info("year published : {}".format(self.year))
log.info("thumbnail : {}".format(self.thumbnail))
log.info("image : {}".format(self.image))
if self.categories:
log.info("categories")
for i in self.categories:
log.info("- {}".format(i))
if self.systems:
log.info("systems")
for i in self.systems:
log.info("- {}".format(i))
if self.genres:
log.info("genres")
for i in self.genres:
log.info("- {}".format(i))
if self.mechanics:
log.info("mechanics")
for i in self.mechanics:
log.info("- {}".format(i))
if self.designers:
log.info("designers")
for i in self.designers:
log.info("- {}".format(i))
if self.artists:
log.info("artists")
for i in self.artists:
log.info("- {}".format(i))
if self.publishers:
log.info("publishers")
for i in self.publishers:
log.info("- {}".format(i))
if self.videos:
log.info("videos")
for v in self.videos:
v._format(log)
log.info("--------")
if self.versions:
log.info("versions")
for v in self.versions:
v._format(log)
log.info("--------")
log.info("users rated game : {}".format(self.users_rated))
log.info("users avg rating : {}".format(self.rating_average))
log.info("users b-avg rating: {}".format(self.rating_bayes_average))
log.info("users commented : {}".format(self.users_commented))
log.info("users owned : {}".format(self.users_owned))
log.info("users wanting : {}".format(self.users_wanting))
log.info("users wishing : {}".format(self.users_wishing))
log.info("users trading : {}".format(self.users_trading))
log.info("ranks : {}".format(self.ranks))
log.info("description : {}".format(self.description))
if self.comments:
for c in self.comments:
c._format(log)
@property
def alternative_names(self):
"""
:return: alternative names
:rtype: list of str
"""
return self._data.get("alternative_names", [])
@property
def description(self):
"""
:return: description
:rtype: str
"""
return self._data.get("description", "")
@property
def systems(self):
"""
:return: systems
:rtype: list of str
"""
return self._data.get("systems", [])
@property
def genres(self):
"""
:return: genres
:rtype: list of str
"""
return self._data.get("genres", [])
@property
def categories(self):
"""
:return: categories
:rtype: list of str
"""
return self._data.get("categories", [])
@property
def comments(self):
return self._comments
@property
def mechanics(self):
"""
:return: mechanics
:rtype: list of str
"""
return self._data.get("mechanics", [])
@property
def designers(self):
"""
:return: designers
:rtype: list of str
"""
return self._data.get("designers", [])
@property
def artists(self):
"""
:return: artists
:rtype: list of str
"""
return self._data.get("artists", [])
@property
def publishers(self):
"""
:return: publishers
:rtype: list of str
"""
return self._data.get("publishers", [])
@property
def users_owned(self):
"""
:return: number of users owning this game
:rtype: integer
:return: ``None`` if n/a
"""
return self._stats.users_owned
@property
def users_trading(self):
"""
:return: number of users trading this game
:rtype: integer
:return: ``None`` if n/a
"""
return self._stats.users_trading
@property
def users_wanting(self):
"""
:return: number of users wanting this game
:rtype: integer
:return: ``None`` if n/a
"""
return self._data.get("wanting")
@property
def users_wishing(self):
"""
:return: number of users wishing for this game
:rtype: integer
:return: ``None`` if n/a
"""
return self._data.get("wishing")
@property
def users_commented(self):
"""
:return: number of user comments
:rtype: integer
:return: ``None`` if n/a
"""
return self._data.get("numcomments")
@property
def rating_num_weights(self):
"""
:return:
:rtype: integer
:return: ``None`` if n/a
"""
return self._stats.rating_num_weights
@property
def rating_average_weight(self):
"""
:return: average weight
:rtype: float
:return: ``None`` if n/a
"""
return self._stats.rating_average_weight
@property
def videos(self):
"""
:return: videos of this game
:rtype: list of :py:class:`boardgamegeek.game.BoardGameVideo`
"""
return self._videos
@property
def versions(self):
"""
:return: versions of this game
:rtype: list of :py:class:`boardgamegeek.game.BoardGameVersion`
"""
return self._versions
class RPGIssue(BaseGame):
"""
Object containing information about a role-playing game
"""
def __init__(self, data):
self._videos = []
self._videos_ids = set()
for video in data.get("videos", []):
try:
if video["id"] not in self._videos_ids:
self._videos.append(BoardGameVideo(video))
self._videos_ids.add(video["id"])
except KeyError:
raise BGGError("invalid video data")
self._comments = []
for comment in data.get("comments", []):
self.add_comment(comment)
self._articles = []
for article in data.get("articles", []):
self.add_article(article)
super(RPGIssue, self).__init__(data)
def __repr__(self):
return "RPGIssue (id: {})".format(self.id)
def add_comment(self, data):
self._comments.append(BoardGameComment(data))
def add_article(self, data):
self._articles.append(RPGIssueArticle(data))
def _format(self, log):
log.info("rpg id : {}".format(self.id))
log.info("rpg name : {}".format(self.name))
log.info("periodical : {}".format(self.magazine))
log.info("issue no. : {}".format(self.issue_number))
log.info("rpg rank : {}".format(self.bgg_rank))
if self.alternative_names:
for i in self.alternative_names:
log.info("alternative name : {}".format(i))
log.info("year published : {}".format(self.year))
log.info("thumbnail : {}".format(self.thumbnail))
log.info("image : {}".format(self.image))
if self.categories:
log.info("categories")
for i in self.categories:
log.info("- {}".format(i))
if self.systems:
log.info("systems")
for i in self.systems:
log.info("- {}".format(i))
if self.genres:
log.info("genres")
for i in self.genres:
log.info("- {}".format(i))
if self.mechanics:
log.info("mechanics")
for i in self.mechanics:
log.info("- {}".format(i))
if self.designers:
log.info("designers")
for i in self.designers:
log.info("- {}".format(i))
if self.artists:
log.info("artists")
for i in self.artists:
log.info("- {}".format(i))
if self.publishers:
log.info("publishers")
for i in self.publishers:
log.info("- {}".format(i))
if self.videos:
log.info("videos")
for v in self.videos:
v._format(log)
log.info("--------")
if self.versions:
log.info("versions")
for v in self.versions:
v._format(log)
log.info("--------")
log.info("users rated game : {}".format(self.users_rated))
log.info("users avg rating : {}".format(self.rating_average))
log.info("users b-avg rating: {}".format(self.rating_bayes_average))
log.info("users commented : {}".format(self.users_commented))
log.info("users owned : {}".format(self.users_owned))
log.info("users wanting : {}".format(self.users_wanting))
log.info("users wishing : {}".format(self.users_wishing))
log.info("users trading : {}".format(self.users_trading))
log.info("ranks : {}".format(self.ranks))
log.info("description : {}".format(self.description))
if self.comments:
for c in self.comments:
c._format(log)
if self.articles:
for a in self.articles:
a._format(log)
@property
def alternative_names(self):
"""
:return: alternative names
:rtype: list of str
"""
return self._data.get("alternative_names", [])
@property
def magazine(self):
return self._data.get("magazine", "")
@property
def issue_number(self):
return self._data.get("issue_number", -1)
@property
def description(self):
"""
:return: description
:rtype: str
"""
return self._data.get("description", "")
@property
def systems(self):
"""
:return: systems
:rtype: list of str
"""
return self._data.get("systems", [])
@property
def genres(self):
"""
:return: genres
:rtype: list of str
"""
return self._data.get("genres", [])
@property
def categories(self):
"""
:return: categories
:rtype: list of str
"""
return self._data.get("categories", [])
@property
def articles(self):
return self._articles
@property
def comments(self):
return self._comments
@property
def mechanics(self):
"""
:return: mechanics
:rtype: list of str
"""
return self._data.get("mechanics", [])
@property
def designers(self):
"""
:return: designers
:rtype: list of str
"""
return self._data.get("designers", [])
@property
def artists(self):
"""
:return: artists
:rtype: list of str
"""
return self._data.get("artists", [])
@property
def publishers(self):
"""
:return: publishers
:rtype: list of str
"""
return self._data.get("publishers", [])
@property
def users_owned(self):
"""
:return: number of users owning this game
:rtype: integer
:return: ``None`` if n/a
"""
return self._stats.users_owned
@property
def users_trading(self):
"""
:return: number of users trading this game
:rtype: integer
:return: ``None`` if n/a
"""
return self._stats.users_trading
@property
def users_wanting(self):
"""
:return: number of users wanting this game
:rtype: integer
:return: ``None`` if n/a
"""
return self._data.get("wanting")
@property
def users_wishing(self):
"""
:return: number of users wishing for this game
:rtype: integer
:return: ``None`` if n/a
"""
return self._data.get("wishing")
@property
def users_commented(self):
"""
:return: number of user comments
:rtype: integer
:return: ``None`` if n/a
"""
return self._data.get("numcomments")
@property
def rating_num_weights(self):
"""
:return:
:rtype: integer
:return: ``None`` if n/a
"""
return self._stats.rating_num_weights
@property
def rating_average_weight(self):
"""
:return: average weight
:rtype: float
:return: ``None`` if n/a
"""
return self._stats.rating_average_weight
@property
def videos(self):
"""
:return: videos of this game
:rtype: list of :py:class:`boardgamegeek.game.BoardGameVideo`
"""
return self._videos
@property
def versions(self):
"""
:return: versions of this game
:rtype: list of :py:class:`boardgamegeek.game.BoardGameVersion`
"""
return self._versions
class RPGIssueArticle(DictObject):
def _format(self, log):
author_string = self.authors[0] if len(self.authors) == 1 else self.authors
log.info("{} by {} on page {}: {}".format(self.type, author_string, self.page, self.description))
@property
def author(self):
return self._data.get("authors", [])
@property
def type(self):
return self._data.get("type", "")
@property
def description(self):
return self._data.get("description", "")
@property
def page(self):
return self._data.get("page", -1)
| 27.289931
| 105
| 0.522552
| 1,655
| 15,719
| 4.855589
| 0.093051
| 0.063589
| 0.052265
| 0.063464
| 0.882155
| 0.865854
| 0.865854
| 0.853783
| 0.853783
| 0.853783
| 0
| 0.000579
| 0.340607
| 15,719
| 575
| 106
| 27.337391
| 0.774723
| 0.159616
| 0
| 0.879747
| 0
| 0
| 0.115549
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.174051
| false
| 0
| 0.022152
| 0.03481
| 0.35443
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b1fdd51256adbbc4ec53bebc7d3c401be0f4b16
| 11,317
|
py
|
Python
|
tests/test_suites/test_suite_schema_callback.py
|
erickvneri/st-schema-python
|
0fb955d421244c3619f0f46bcf040c47e40c74d1
|
[
"MIT"
] | null | null | null |
tests/test_suites/test_suite_schema_callback.py
|
erickvneri/st-schema-python
|
0fb955d421244c3619f0f46bcf040c47e40c74d1
|
[
"MIT"
] | null | null | null |
tests/test_suites/test_suite_schema_callback.py
|
erickvneri/st-schema-python
|
0fb955d421244c3619f0f46bcf040c47e40c74d1
|
[
"MIT"
] | null | null | null |
import pytest
from stschema.schema_callbacks import SchemaCallback
from tests.device_fixture import SchemaDeviceFixture
# Url for testing Callbacks
# Comment/Unskip test cases:
# - line 46
# - line 59
# - line 109
# - line 195
custom_test_url = ''
class TestSuiteSchemaCallbacks:
"""
Test Suite on SchemaCallback Interface.
Index:
- TestSchemaCallbackAttributes
- TestAccessTokenResource
- TestStateCallback
- TestDiscoveryCallback
"""
class TestSchemaCallbackAttributes:
def test_documentation(self):
assert SchemaCallback
assert SchemaCallback.__doc__
def test_public_methods(self):
assert SchemaCallback.access_token_request
assert SchemaCallback.discovery_callback
assert SchemaCallback.state_callback
def test_private_methods(self):
assert SchemaCallback._access_token_request
assert SchemaCallback._discovery_callback
assert SchemaCallback._state_callback
assert SchemaCallback._validate_callback_args
class TestAccessTokenResource:
# Test case on access_token_request
# resource interface.
def test_resource_documentation(self):
assert SchemaCallback.access_token_request
assert SchemaCallback.access_token_request.__doc__
@pytest.mark.skipif(not custom_test_url, reason="no test url provided")
def test_access_token_request(self):
# Test case to check real-time HttpRequests,
# unskip to enable.
assert SchemaCallback.access_token_request(
'client_id',
'client_secret',
'code',
None, # None Refresh Token
'request_id',
custom_test_url
)
@pytest.mark.skipif(not custom_test_url, reason="no test url provided")
def test_refresh_token_request(self):
# Test case to check real-time HttpRequests,
# unskip to enable.
assert SchemaCallback.access_token_request(
'client_id',
'client_secret',
None, # None Code
'refresh_token',
'request_id',
custom_test_url
)
def test_type_error_instances(self):
with pytest.raises(TypeError):
assert SchemaCallback.access_token_request()
with pytest.raises(TypeError):
assert SchemaCallback.access_token_request('client_id')
with pytest.raises(TypeError):
assert SchemaCallback.access_token_request(
'client_id',
'client_secret'
)
with pytest.raises(TypeError):
assert SchemaCallback.access_token_request(
'client_id',
'client_secret',
'code'
)
with pytest.raises(TypeError):
assert SchemaCallback.access_token_request(
'client_id',
'client_secret',
'code',
'request_id'
)
with pytest.raises(TypeError):
assert SchemaCallback.access_token_request(
'client_id',
'client_secret',
'code',
'refresh_token',
'request_id'
)
def test_type_error_url(self):
with pytest.raises(TypeError):
assert SchemaCallback.access_token_request(
'client_id',
'client_secret',
'code',
'refresh_token',
'request_id',
'url'
)
class TestStateCallbackMethod:
# TODO: Review for fixture update
# Partial Fixture to test SchemaDevice
# types inputs on responses.
state_devices_argument = lambda data_type: \
SchemaCallback.state_callback(
'access_token',
'request_id',
'https://TEST_URL_ARG',
data_type
)
# Test case on SchemaCallback.state_callback
def test_documentation(self):
assert SchemaCallback.state_callback
assert SchemaCallback.state_callback.__doc__
@pytest.mark.skipif(not custom_test_url, reason="no test url provided")
def test_state_callback(self):
# Test case to check real-time HttpRequests,
# unskip to enable.
assert SchemaCallback.state_callback(
'access_token',
'request_id',
custom_test_url,
[SchemaDeviceFixture()]
)
def test_type_error_missing_arguments(self):
with pytest.raises(TypeError):
assert SchemaCallback.state_callback()
with pytest.raises(TypeError):
assert SchemaCallback.state_callback('access_token')
with pytest.raises(TypeError):
assert SchemaCallback.state_callback(
'access_token',
'request_id'
)
with pytest.raises(TypeError):
assert SchemaCallback.state_callback(
'access_token',
'request_id',
'url'
)
def test_type_error_invalid_url(self):
with pytest.raises(TypeError):
assert SchemaCallback.state_callback(
'access_token',
'request_id',
'http://INVALID_URL',
[]
)
def test_type_error_state_devices_argument(self):
with pytest.raises(TypeError):
assert self.state_devices_argument(str('INVALID'))
with pytest.raises(TypeError):
assert self.state_devices_argument(int(10101010))
with pytest.raises(TypeError):
assert self.state_devices_argument(tuple(('INVALID')))
with pytest.raises(TypeError):
assert self.state_devices_argument(dict(key='val'))
with pytest.raises(TypeError):
assert self.state_devices_argument(bytes(b'1010101'))
with pytest.raises(TypeError):
assert self.state_devices_argument(frozenset({'INVALID'}))
with pytest.raises(TypeError):
assert self.state_devices_argument(set({'INVALID'}))
def test_type_error_devices_arg_items(self):
with pytest.raises(TypeError):
assert self.state_devices_argument([str('INVALID')])
with pytest.raises(TypeError):
assert self.state_devices_argument([int(10101010)])
with pytest.raises(TypeError):
assert self.state_devices_argument([tuple(('INVALID'))])
with pytest.raises(TypeError):
assert self.state_devices_argument([dict(key='val')])
with pytest.raises(TypeError):
assert self.state_devices_argument([bytes(b'1010101')])
with pytest.raises(TypeError):
assert self.state_devices_argument([frozenset({'INVALID'})])
with pytest.raises(TypeError):
assert self.state_devices_argument([set({'INVALID'})])
class TestDiscoveryCallbackMethod:
# TODO: Review for fixture update
# Partial Fixture to test SchemaDevice
# types inputs on responses.
discovery_devices_argument = lambda data_type: \
SchemaCallback.discovery_callback(
'access_token',
'request_id',
'https://TEST_URL_ARG',
data_type
)
# Test case on SchemaCallback.discovery_request
def test_documentation(self):
assert SchemaCallback.discovery_callback
assert SchemaCallback.discovery_callback.__doc__
@pytest.mark.skipif(not custom_test_url, reason="no test url provided")
def test_discovery_callback(self):
# Test case to check real-time HttpRequests,
# unskip to enable.
assert SchemaCallback.discovery_callback(
'access_token',
'request_id',
custom_test_url,
[SchemaDeviceFixture()]
)
def test_type_error_missing_arguments(self):
with pytest.raises(TypeError):
assert SchemaCallback.discovery_callback('access_token')
with pytest.raises(TypeError):
assert SchemaCallback.discovery_callback(
'access_token',
'request_id'
)
with pytest.raises(TypeError):
assert SchemaCallback.discovery_callback(
'access_token',
'request_id',
'url'
)
def test_type_error_invalid_url(self):
with pytest.raises(TypeError):
assert SchemaCallback.discovery_callback(
'access_token',
'request_id',
'http://INVALID_URL',
[]
)
def test_type_error_discovery_devices_argument(self):
with pytest.raises(TypeError):
assert self.discovery_devices_argument(str('INVALID'))
with pytest.raises(TypeError):
assert self.discovery_devices_argument(int(10101010))
with pytest.raises(TypeError):
assert self.discovery_devices_argument(tuple(('INVALID')))
with pytest.raises(TypeError):
assert self.discovery_devices_argument(dict(key='val'))
with pytest.raises(TypeError):
assert self.discovery_devices_argument(bytes(b'1010101'))
with pytest.raises(TypeError):
assert self.discovery_devices_argument(frozenset({'INVALID'}))
with pytest.raises(TypeError):
assert self.discovery_devices_argument(set({'INVALID'}))
def test_type_error_devices_arg_items(self):
with pytest.raises(TypeError):
assert self.discovery_devices_argument([str('INVALID')])
with pytest.raises(TypeError):
assert self.discovery_devices_argument([int(10101010)])
with pytest.raises(TypeError):
assert self.discovery_devices_argument([tuple(('INVALID'))])
with pytest.raises(TypeError):
assert self.discovery_devices_argument([dict(key='val')])
with pytest.raises(TypeError):
assert self.discovery_devices_argument([bytes(b'1010101')])
with pytest.raises(TypeError):
assert self.discovery_devices_argument([frozenset({'INVALID'})])
with pytest.raises(TypeError):
assert self.discovery_devices_argument([set({'INVALID'})])
| 39.708772
| 80
| 0.566846
| 999
| 11,317
| 6.152152
| 0.109109
| 0.071591
| 0.114546
| 0.178978
| 0.881712
| 0.871461
| 0.825903
| 0.808005
| 0.794012
| 0.787016
| 0
| 0.009615
| 0.35672
| 11,317
| 284
| 81
| 39.848592
| 0.834615
| 0.077406
| 0
| 0.625551
| 0
| 0
| 0.08025
| 0
| 0
| 0
| 0
| 0.003521
| 0.277533
| 1
| 0.088106
| false
| 0
| 0.013216
| 0
| 0.123348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8271f303eda471dfe4ad3cda95efd8299c61172c
| 6,334
|
py
|
Python
|
cyder/search/tests/test_dns.py
|
ngokevin/chili
|
36c354ac567471d5e36dccf9eea5096c6b02d4b9
|
[
"BSD-3-Clause"
] | 2
|
2019-03-16T00:47:09.000Z
|
2022-03-04T14:39:08.000Z
|
cyder/search/tests/test_dns.py
|
ngokevin/chili
|
36c354ac567471d5e36dccf9eea5096c6b02d4b9
|
[
"BSD-3-Clause"
] | 1
|
2020-04-24T08:24:55.000Z
|
2020-04-24T08:24:55.000Z
|
cyder/search/tests/test_dns.py
|
ngokevin/chili
|
36c354ac567471d5e36dccf9eea5096c6b02d4b9
|
[
"BSD-3-Clause"
] | null | null | null |
from django.test import TestCase, Client
from cyder.cydns.tests.utils import create_fake_zone
from cyder.cydns.ptr.models import PTR
from cyder.cydns.cname.models import CNAME
from cyder.cydns.address_record.models import AddressRecord
from cyder.search.compiler.django_compile import compile_to_django
class SearchDNSTests(TestCase):
def setUp(self):
self.c = Client()
def search(self, query):
res, errors = compile_to_django(query)
return res, errors
def test_integration1(self):
create_fake_zone("wee.wee.mozilla.com", "")
res, error = self.search("wee.wee.mozilla.com")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 1)
self.assertEqual(len(res['NS']), 1)
self.assertEqual(len(res['DOMAIN']), 1)
create_fake_zone("wee1.wee.mozilla.com", "")
res, error = self.search("wee1.wee.mozilla.com")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 1)
self.assertEqual(len(res['NS']), 1)
self.assertEqual(len(res['DOMAIN']), 1)
res, error = self.search("wee1.wee.mozilla.com OR "
"wee.wee.mozilla.com")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 2)
self.assertEqual(len(res['NS']), 2)
self.assertEqual(len(res['DOMAIN']), 2)
res, error = self.search("wee1.wee.mozilla.com type=:SOA")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 1)
self.assertEqual(len(res['NS']), 0)
self.assertEqual(len(res['DOMAIN']), 0)
res, error = self.search(
"wee1.wee.mozilla.com type=:NS OR "
"wee.wee.mozilla.com type=:DOMAIN")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 0)
self.assertEqual(len(res['NS']), 1)
self.assertEqual(len(res['DOMAIN']), 1)
def test_integration2(self):
root_domain = create_fake_zone("wee2.wee.mozilla.com", "")
res, error = self.search("wee2.wee.mozilla.com")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 1)
self.assertEqual(len(res['NS']), 1)
self.assertEqual(len(res['DOMAIN']), 1)
create_fake_zone("1.1.ip6.arpa", "")
res, error = self.search("1.1.ip6.arpa")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 1)
self.assertEqual(len(res['NS']), 1)
self.assertEqual(len(res['DOMAIN']), 1)
ptr = PTR(name="host1.wee2.wee.mozilla.com", ip_str="1111::",
ip_type="6")
ptr.save()
addr = AddressRecord(label="host1", domain=root_domain, ip_str="11::",
ip_type="6")
addr.save()
res, error = self.search("host1.wee2.wee.mozilla.com")
self.assertFalse(error)
self.assertEqual(len(res['A']), 1)
self.assertEqual(len(res['PTR']), 1)
res, error = self.search("host1.wee2.wee.mozilla.com type=:A")
self.assertFalse(error)
self.assertEqual(len(res['A']), 1)
self.assertEqual(len(res['PTR']), 0)
res, error = self.search("host1.wee2.wee.mozilla.com type=:PTR")
self.assertFalse(error)
self.assertEqual(len(res['A']), 0)
self.assertEqual(len(res['PTR']), 1)
res, error = self.search("host1.wee2.wee.mozilla.com type=:A "
"type=:PTR")
self.assertFalse(error)
self.assertEqual(len(res['A']), 0)
self.assertEqual(len(res['PTR']), 0)
def test_integration3_zone(self):
root_domain = create_fake_zone("wee3.wee.mozilla.com", "")
res, error = self.search("zone=:wee3.wee.mozilla.com")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 1)
self.assertEqual(len(res['NS']), 1)
cn = CNAME(label="host1", domain=root_domain, target="whop.whop")
cn.save()
res, error = self.search("zone=:wee3.wee.mozilla.com host1")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 0)
self.assertEqual(len(res['NS']), 0)
self.assertEqual(len(res['CNAME']), 1)
res, error = self.search("zone=:wee3.wee.mozilla.com "
"type=:CNAME")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 0)
self.assertEqual(len(res['NS']), 0)
self.assertEqual(len(res['CNAME']), 1)
def test_integration4_ip_range(self):
create_fake_zone("wee3.wee.mozilla.com", "")
create_fake_zone("1.2.ip6.arpa", "")
res, error = self.search("1.2.ip6.arpa")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 1)
self.assertEqual(len(res['NS']), 1)
self.assertEqual(len(res['DOMAIN']), 1)
ptr = PTR(name="host1.wee2.wee.mozilla.com", ip_str="2111:0::",
ip_type="6")
ptr.save()
res, error = self.search(ptr.ip_str)
self.assertFalse(error)
self.assertEqual(len(res['PTR']), 1)
self.assertEqual(len(res['A']), 0)
res, error = self.search("2111:0:0::")
self.assertFalse(error)
self.assertEqual(len(res['PTR']), 0)
self.assertEqual(len(res['A']), 0)
def test_integration5_ip(self):
root_domain = create_fake_zone("wee5.wee.mozilla.com", "")
create_fake_zone("10.in-addr.arpa", "")
res, error = self.search("10.in-addr.arpa OR "
"wee5.wee.mozilla.com")
self.assertFalse(error)
self.assertEqual(len(res['SOA']), 2)
self.assertEqual(len(res['NS']), 2)
self.assertEqual(len(res['DOMAIN']), 2)
ptr = PTR(name="host1.wee2.wee.mozilla.com", ip_str="10.0.0.1",
ip_type="4")
ptr.save()
addr = AddressRecord(label="host1", domain=root_domain,
ip_str="10.0.0.1", ip_type="4")
addr.save()
res, error = self.search(ptr.ip_str)
self.assertFalse(error)
self.assertEqual(len(res['PTR']), 1)
self.assertEqual(len(res['A']), 1)
res, error = self.search("10.0.0.2")
self.assertFalse(error)
self.assertEqual(len(res['PTR']), 0)
self.assertEqual(len(res['A']), 0)
| 38.387879
| 78
| 0.576413
| 821
| 6,334
| 4.38246
| 0.096224
| 0.212618
| 0.255142
| 0.297665
| 0.827404
| 0.789605
| 0.754308
| 0.701779
| 0.685937
| 0.62229
| 0
| 0.029791
| 0.252763
| 6,334
| 164
| 79
| 38.621951
| 0.730404
| 0
| 0
| 0.571429
| 0
| 0
| 0.152984
| 0.041048
| 0
| 0
| 0
| 0
| 0.507143
| 1
| 0.05
| false
| 0
| 0.042857
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
82740c3e035310146dd60723fc67ef692db065bd
| 172
|
py
|
Python
|
tests/test_GCC.py
|
NVSL/fiddle
|
5edffa92caa0894057a449ad5accb23af748e657
|
[
"MIT"
] | 2
|
2022-01-22T06:12:52.000Z
|
2022-01-24T07:29:44.000Z
|
tests/test_GCC.py
|
NVSL/cfiddle
|
5edffa92caa0894057a449ad5accb23af748e657
|
[
"MIT"
] | null | null | null |
tests/test_GCC.py
|
NVSL/cfiddle
|
5edffa92caa0894057a449ad5accb23af748e657
|
[
"MIT"
] | null | null | null |
from cfiddle.Toolchain.GCC import GCCToolchain
def test_availabable():
assert GCCToolchain.is_toolchain_available("x86") or GCCToolchain.is_toolchain_available("arm")
| 34.4
| 99
| 0.825581
| 21
| 172
| 6.52381
| 0.714286
| 0.20438
| 0.335766
| 0.467153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012739
| 0.087209
| 172
| 4
| 100
| 43
| 0.859873
| 0
| 0
| 0
| 0
| 0
| 0.034884
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
8277b58b45aad4a3dfcf831049883896af21c79e
| 11,688
|
py
|
Python
|
morse-stf/stensorflow/ml/nn/layers/dense.py
|
alipay/Antchain-MPC
|
f6916465e1da5722ca7efadc4eeaca13ec229707
|
[
"Apache-2.0"
] | 33
|
2021-11-23T09:04:03.000Z
|
2022-03-14T07:56:31.000Z
|
morse-stf/stensorflow/ml/nn/layers/dense.py
|
qizhi-zhang/Antchain-MPC
|
f551170f68b0baff328e6594484e9832230fe719
|
[
"Apache-2.0"
] | null | null | null |
morse-stf/stensorflow/ml/nn/layers/dense.py
|
qizhi-zhang/Antchain-MPC
|
f551170f68b0baff328e6594484e9832230fe719
|
[
"Apache-2.0"
] | 6
|
2021-11-25T12:38:41.000Z
|
2022-02-23T03:29:51.000Z
|
#!/usr/bin/env python
# coding=utf-8
"""
Ant Group
Copyright (c) 2004-2020 All Rights Reserved.
------------------------------------------------------
File Name : Dense
Author : Qizhi Zhang
Email: qizhi.zqz@antgroup.com
Create Time : 2020-09-11 15:57
Description : description what the main function of this file
"""
from stensorflow.ml.nn.layers.layer import Layer
import numpy as np
from stensorflow.basic.basic_class.private import PrivateTensor, PrivateVariable
from stensorflow.basic.basic_class.pair import SharedVariablePair, SharedPair
from stensorflow.basic.basic_class.base import get_device
from typing import Union, List
from stensorflow.basic.protocol.bilinear_triangle import BiliinearTriangle
import pandas as pd
from stensorflow.exception.exception import StfNoneException, StfCondException
class Dense_bak(Layer):
"""
Dense Layer
"""
def __init__(self, output_dim, fathers, with_b=True):
if fathers is None:
raise StfNoneException("fathers")
if fathers == []:
raise StfCondException("fathers != []", "fathers == []")
super(Dense, self).__init__(output_dim=output_dim, fathers=fathers)
self.with_b = with_b
for father in fathers:
if not isinstance(father, Layer):
raise Exception("father must be a layer")
wi = SharedVariablePair(ownerL="L", ownerR="R", shape=[father.output_dim, output_dim])
wi.load_from_numpy(
np.random.normal(scale=1.0 / np.sqrt(father.output_dim + 1), size=[father.output_dim, output_dim]))
self.w += [wi]
if with_b:
b = SharedVariablePair(ownerL="L", ownerR="R", shape=[output_dim])
b.load_from_numpy(np.zeros([output_dim]))
self.w += [b]
def __str__(self):
return "Dense Layer of output_dim={}".format(self.output_dim)
def __repr__(self):
return self.__str__()
def func(self, w: List[SharedVariablePair], x: List[Union[PrivateTensor, SharedPair]]):
if len(w) != len(x) + 1:
raise Exception("must have len(w)==len(x)+1")
y = x[0] @ w[0]
y = y.dup_with_precision(x[0].fixedpoint)
for i in range(1, len(x)):
y = y + x[i] @ w[i]
if self.with_b:
y = y + self.w[len(x)]
return y.dup_with_precision(new_fixedpoint=x[0].fixedpoint)
def pull_back(self, w: List[SharedPair], x: List[Union[PrivateTensor, SharedPair]], y: SharedPair,
ploss_py: SharedPair) -> (List[SharedPair], List[SharedPair]):
batch_size = x[0].shape[0]
list_ploss_px = []
ploss_pw = []
for i in range(len(x)):
ploss_pxi = ploss_py @ w[i].transpose()
list_ploss_px += [ploss_pxi.dup_with_precision(x[0].fixedpoint)]
ploss_pwi = x[i].transpose() @ ploss_py
ploss_pwi = ploss_pwi.dup_with_precision(x[0].fixedpoint)
ploss_pwi = ploss_pwi / batch_size
ploss_pw += [ploss_pwi.dup_with_precision(x[0].fixedpoint)]
ploss_px = dict(zip(self.fathers, list_ploss_px))
if self.with_b:
ploss_pb = ploss_py.reduce_sum(axis=[0]) / batch_size
ploss_pw += [ploss_pb.dup_with_precision(x[0].fixedpoint)]
return ploss_pw, ploss_px
def save(self, save_file_machine, sess, path):
j = 0
for weight in self.w:
weight = weight.to_tf_tensor(owner=save_file_machine)
weight = sess.run(weight)
weight = pd.DataFrame(data=weight)
weight.to_csv(path + "_{}".format(j), header=False, index=False)
j += 1
def load(self, path):
j = 0
w = []
for weight in self.w:
assert isinstance(weight, SharedVariablePair) or isinstance(weight, PrivateVariable)
value = pd.read_csv(path + "_{}".format(j), header=None, index_col=None)
value = np.array(value)
value = np.reshape(value, weight.shape)
weight.load_from_numpy(value, const=True)
w += [weight]
j += 1
self.w = w
class Dense(Layer):
"""
Dense Layer
"""
def __init__(self, output_dim, fathers, with_b=True):
if fathers is None:
raise StfNoneException("fathers")
if fathers == []:
raise StfCondException("fathers != []", "fathers == []")
super(Dense, self).__init__(output_dim=output_dim, fathers=fathers)
self.with_b = with_b
self.bt_list = []
for father in fathers:
if not isinstance(father, Layer):
raise Exception("father must be a layer")
wi = SharedVariablePair(ownerL="L", ownerR="R", shape=[father.output_dim, output_dim])
wi.load_from_numpy(
np.random.normal(scale=1.0 / np.sqrt(father.output_dim + 1), size=[father.output_dim, output_dim]))
self.w += [wi]
f_xy = lambda a, b: a@b
f_yz = lambda b, c: b@c.transpose()
f_zx = lambda c, a: c.transpose()@a
bt = BiliinearTriangle(f_xy, f_yz, f_zx) # x, w, plosspy^T
self.bt_list.append(bt)
if with_b:
b = SharedVariablePair(ownerL="L", ownerR="R", shape=[output_dim])
b.load_from_numpy(np.zeros([output_dim]))
self.w += [b]
def __str__(self):
return "Dense Layer of output_dim={}".format(self.output_dim)
def __repr__(self):
return self.__str__()
def func(self, w: List[SharedVariablePair], x: List[Union[PrivateTensor, SharedPair]]):
if len(w) != len(x) + 1:
raise Exception("must have len(w)==len(x)+1")
# y = x[0] @ w[0]
y = self.bt_list[0].compute_u(x[0], w[0])
y = y.dup_with_precision(x[0].fixedpoint)
for i in range(1, len(x)):
# y = y + x[i] @ w[i]
y = y + self.bt_list[i].compute_u(x[i], w[i])
if self.with_b:
y = y + self.w[len(x)]
return y.dup_with_precision(new_fixedpoint=x[0].fixedpoint)
def pull_back(self, w: List[SharedPair], x: List[Union[PrivateTensor, SharedPair]], y: SharedPair,
ploss_py: SharedPair) -> (List[SharedPair], List[SharedPair]):
batch_size = x[0].shape[0]
list_ploss_px = []
ploss_pw = []
for i in range(len(x)):
# ploss_pxi = ploss_py @ w[i].transpose()
# ploss_pwi = x[i].transpose() @ ploss_py
ploss_pxi_t, ploss_pwi_t = self.bt_list[i].compute_vw(ploss_py)
ploss_pxi = ploss_pxi_t.transpose()
ploss_pwi = ploss_pwi_t.transpose()
list_ploss_px += [ploss_pxi.dup_with_precision(x[0].fixedpoint)]
ploss_pwi = ploss_pwi.dup_with_precision(x[0].fixedpoint)
ploss_pwi = ploss_pwi / batch_size
ploss_pw += [ploss_pwi.dup_with_precision(x[0].fixedpoint)]
ploss_px = dict(zip(self.fathers, list_ploss_px))
if self.with_b:
ploss_pb = ploss_py.reduce_sum(axis=[0]) / batch_size
ploss_pw += [ploss_pb.dup_with_precision(x[0].fixedpoint)]
return ploss_pw, ploss_px
def save(self, save_file_machine, sess, path):
j = 0
for weight in self.w:
weight = weight.to_tf_tensor(owner=save_file_machine)
weight = sess.run(weight)
weight = pd.DataFrame(data=weight)
weight.to_csv(path + "_{}".format(j), header=False, index=False)
j += 1
def load(self, path):
j = 0
w = []
for weight in self.w:
assert isinstance(weight, SharedVariablePair) or isinstance(weight, PrivateVariable)
value = pd.read_csv(path + "_{}".format(j), header=None, index_col=None)
value = np.array(value)
value = np.reshape(value, weight.shape)
weight.load_from_numpy(value, const=True)
w += [weight]
j += 1
self.w = w
class Dense_Local(Layer):
def __init__(self, output_dim, fathers, owner, with_b=True):
super(Dense_Local, self).__init__(output_dim=output_dim, fathers=fathers)
self.w = []
self.owner = get_device(owner)
self.with_b = with_b
for father in fathers:
if not isinstance(father, Layer):
raise Exception("father must be a layer")
wi = PrivateVariable(owner=self.owner)
# wi.from_numpy(np.random.uniform(size=[father.output_dim, output_dim],
# low=-1.0 / np.sqrt(father.output_dim + 1),
# high=1.0 / np.sqrt(father.output_dim + 1)))
wi.load_from_numpy(
np.random.normal(scale=1.0 / np.sqrt(father.output_dim + 1), size=[father.output_dim, output_dim]))
self.w += [wi]
if with_b:
b = PrivateVariable(owner=self.owner)
b.load_from_numpy(np.zeros([output_dim]))
self.w += [b]
def func(self, w: List[PrivateTensor], x: List[Union[PrivateTensor, SharedPair]]):
if (len(w) != len(x)) and (len(w) != len(x) + 1):
raise Exception("must have len(w) == len(x) or len(w)==len(x)+1")
y = PrivateTensor.from_PrivteTensorBase(x[0].to_private(owner=self.owner), op_map=x[0].op_map) @ w[0]
y = y.dup_with_precision(x[0].fixedpoint)
for i in range(1, len(x)):
xi = x[i].to_private(owner=self.owner)
y = y + xi @ w[i]
if self.with_b:
y = y + self.w[len(x)]
return y.dup_with_precision(new_fixedpoint=x[0].fixedpoint)
def pull_back(self, w: List[PrivateTensor], x: List[Union[PrivateTensor, SharedPair]], y: SharedPair,
ploss_py: SharedPair) -> (List[PrivateTensor], List[SharedPair]):
batch_size = x[0].shape[0]
list_ploss_px = []
ploss_pw = []
ploss_py = ploss_py.to_private(owner=self.owner)
ploss_py = PrivateTensor.from_PrivteTensorBase(ploss_py)
for i in range(len(x)):
ploss_pxi = ploss_py @ w[i].transpose()
list_ploss_px += [ploss_pxi.dup_with_precision(x[0].fixedpoint)]
xi = x[i].to_private(owner=self.owner)
xi = PrivateTensor.from_PrivteTensorBase(xi)
ploss_pwi = xi.transpose() @ ploss_py
ploss_pwi = ploss_pwi.dup_with_precision(x[0].fixedpoint)
ploss_pwi = ploss_pwi / batch_size
ploss_pw += [ploss_pwi.dup_with_precision(x[0].fixedpoint)]
ploss_px = dict(zip(self.fathers, list_ploss_px))
if self.with_b:
ploss_pb = ploss_py.reduce_sum(axis=[0]) / batch_size
ploss_pw += [ploss_pb.dup_with_precision(x[0].fixedpoint)]
return ploss_pw, ploss_px
def save(self, save_file_machine, sess, path):
j = 0
for weight in self.w:
weight = weight.to_tf_tensor(owner=save_file_machine)
weight = sess.run(weight)
weight = pd.DataFrame(data=weight)
weight.to_csv(path + "_{}".format(j), header=False, index=False)
j += 1
def load(self, path):
j = 0
w = []
for weight in self.w:
assert isinstance(weight, SharedVariablePair) or isinstance(weight, PrivateVariable)
value = pd.read_csv(path + "_{}".format(j), header=None, index_col=None)
value = np.array(value)
value = np.reshape(value, weight.shape)
weight.load_from_numpy(value, const=True)
w += [weight]
j += 1
self.w = w
| 38.321311
| 115
| 0.585387
| 1,564
| 11,688
| 4.162404
| 0.120205
| 0.048387
| 0.04424
| 0.039171
| 0.834716
| 0.80768
| 0.801843
| 0.796928
| 0.77619
| 0.764055
| 0
| 0.011247
| 0.284908
| 11,688
| 304
| 116
| 38.447368
| 0.767648
| 0.05835
| 0
| 0.808889
| 0
| 0.004444
| 0.028532
| 0
| 0
| 0
| 0
| 0
| 0.013333
| 1
| 0.084444
| false
| 0
| 0.04
| 0.017778
| 0.182222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82b43c67c4f919f3704e599d8dddf77133777299
| 2,418
|
py
|
Python
|
chorus/draw/drawable.py
|
mojaie/chorus
|
63cbe4764ab2498b7b1da11a628bec01d03ca012
|
[
"MIT"
] | 5
|
2018-03-23T04:56:17.000Z
|
2022-03-04T15:54:39.000Z
|
chorus/draw/drawable.py
|
mojaie/chorus
|
63cbe4764ab2498b7b1da11a628bec01d03ca012
|
[
"MIT"
] | 4
|
2017-09-08T02:08:12.000Z
|
2018-06-12T00:55:18.000Z
|
chorus/draw/drawable.py
|
mojaie/chorus
|
63cbe4764ab2498b7b1da11a628bec01d03ca012
|
[
"MIT"
] | 6
|
2018-01-22T22:21:20.000Z
|
2021-03-25T04:47:11.000Z
|
#
# (C) 2014-2017 Seiji Matsuoka
# Licensed under the MIT License (MIT)
# http://opensource.org/licenses/MIT
#
class Drawable(object):
def draw_line(self, p1, p2, c1=(0, 0, 0), c2=None):
"""Draw line segment for bond.
Args:
p1 (x, y): the start point
p2 (x, y): the end point
c1 (R, G, B): RGB color(0-255) of p1 side half
c2 (R, G, B): RGB color(0-255) of p2 side half.
if c2 is undefined, c1 color will be used instead.
"""
raise NotImplementedError()
def draw_dashed_line(self, p1, p2, c1=(0, 0, 0), c2=None):
"""Draw dashed line segment for stereobond.
Args:
p1 (x, y): the start point
p2 (x, y): the end point
c1 (R, G, B): RGB color(0-255) of p1 side half
c2 (R, G, B): RGB color(0-255) of p2 side half.
if c2 is undefined, c1 color will be used instead.
"""
raise NotImplementedError()
def draw_wave_line(self, p1, p2, color=(0, 0, 0)):
"""Draw wave line segment for stereobond.
Args:
p1 (x, y): the start point
p2 (x, y): the end point
color (R, G, B): RGB color(0-255)
"""
raise NotImplementedError()
def draw_wedge(self, head, tail, color=(0, 0, 0)):
"""Draw wedge (isoscales triangle) for stereobond.
Args:
head (x, y): apex of the triangle
tail (x, y): midpoint of the triangle base
width (float): triangle base width
color (R, G, B): RGB color(0-255)
"""
raise NotImplementedError()
def draw_dashed_wedge(self, head, tail, color=(0, 0, 0)):
"""Draw dashed wedge (isoscales triangle) for stereobond.
Args:
head (x, y): apex of the triangle
tail (x, y): midpoint of the triangle base
width (float): triangle base width
color (R, G, B): RGB color(0-255)
"""
raise NotImplementedError()
def draw_text(self, pos, text, color=(0, 0, 0), align="center"):
"""Draw text for atom symbol.
Args:
pos (x, y): position of the text center
text (str): contents
color (R, G, B): RGB color(0-255)
align ("right", "center" or "left"): text anchor position
"""
raise NotImplementedError()
| 32.675676
| 69
| 0.533912
| 336
| 2,418
| 3.815476
| 0.238095
| 0.018721
| 0.018721
| 0.037442
| 0.74961
| 0.730889
| 0.730889
| 0.730889
| 0.715289
| 0.670047
| 0
| 0.054396
| 0.346154
| 2,418
| 73
| 70
| 33.123288
| 0.756483
| 0.585608
| 0
| 0.461538
| 0
| 0
| 0.009023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.461538
| false
| 0
| 0
| 0
| 0.538462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
82ca857b829fe62ac726c1920f7a0416f9d922c2
| 5,748
|
py
|
Python
|
skyportal/tests/api/test_instrument.py
|
Hallflower20/skyportal
|
e6e6f288f91aa81c4c34e160940d8f54402f6365
|
[
"BSD-3-Clause"
] | 1
|
2021-01-20T05:58:16.000Z
|
2021-01-20T05:58:16.000Z
|
skyportal/tests/api/test_instrument.py
|
Hallflower20/skyportal
|
e6e6f288f91aa81c4c34e160940d8f54402f6365
|
[
"BSD-3-Clause"
] | 151
|
2020-10-15T23:49:47.000Z
|
2022-03-12T08:42:46.000Z
|
skyportal/tests/api/test_instrument.py
|
Hallflower20/skyportal
|
e6e6f288f91aa81c4c34e160940d8f54402f6365
|
[
"BSD-3-Clause"
] | null | null | null |
import uuid
from skyportal.tests import api
def test_token_user_post_get_instrument(super_admin_token):
name = str(uuid.uuid4())
status, data = api(
'POST',
'telescope',
data={
'name': name,
'nickname': name,
'lat': 0.0,
'lon': 0.0,
'elevation': 0.0,
'diameter': 10.0,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
telescope_id = data['data']['id']
instrument_name = str(uuid.uuid4())
status, data = api(
'POST',
'instrument',
data={
'name': instrument_name,
'type': 'imager',
'band': 'NIR',
'filters': ['f110w'],
'telescope_id': telescope_id,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
instrument_id = data['data']['id']
status, data = api('GET', f'instrument/{instrument_id}', token=super_admin_token)
assert status == 200
assert data['status'] == 'success'
assert data['data']['band'] == 'NIR'
def test_fetch_instrument_by_name(super_admin_token):
tel_name = str(uuid.uuid4())
status, data = api(
'POST',
'telescope',
data={
'name': tel_name,
'nickname': tel_name,
'lat': 0.0,
'lon': 0.0,
'elevation': 0.0,
'diameter': 10.0,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
telescope_id = data['data']['id']
instrument_name = str(uuid.uuid4())
status, data = api(
'POST',
'instrument',
data={
'name': instrument_name,
'type': 'imager',
'band': 'V',
'telescope_id': telescope_id,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
instrument_id = data['data']['id']
status, data = api(
'GET', f'instrument?name={instrument_name}', token=super_admin_token
)
assert status == 200
assert data['status'] == 'success'
assert len(data['data']) == 1
assert data['data'][0]['band'] == 'V'
assert data['data'][0]['id'] == instrument_id
assert data['data'][0]['name'] == instrument_name
def test_token_user_update_instrument(
super_admin_token, manage_sources_token, view_only_token
):
name = str(uuid.uuid4())
status, data = api(
'POST',
'telescope',
data={
'name': name,
'nickname': name,
'lat': 0.0,
'lon': 0.0,
'elevation': 0.0,
'diameter': 10.0,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
telescope_id = data['data']['id']
instrument_name = str(uuid.uuid4())
status, data = api(
'POST',
'instrument',
data={
'name': instrument_name,
'type': 'imager',
'band': 'NIR',
'filters': ['f110w'],
'telescope_id': telescope_id,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
instrument_id = data['data']['id']
status, data = api('GET', f'instrument/{instrument_id}', token=super_admin_token)
assert status == 200
assert data['status'] == 'success'
assert data['data']['band'] == 'NIR'
new_name = f'Gattini2_{uuid.uuid4()}'
status, data = api(
'PUT',
f'instrument/{instrument_id}',
data={
'name': new_name,
'type': 'imager',
'band': 'NIR',
'filters': ['f110w'],
'telescope_id': telescope_id,
},
token=manage_sources_token,
)
assert status == 400
assert data['status'] == 'error'
status, data = api(
'PUT',
f'instrument/{instrument_id}',
data={
'name': new_name,
'type': 'imager',
'band': 'NIR',
'filters': ['f110w'],
'telescope_id': telescope_id,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
status, data = api('GET', f'instrument/{instrument_id}', token=view_only_token)
assert status == 200
assert data['status'] == 'success'
assert data['data']['name'] == new_name
def test_token_user_delete_instrument(super_admin_token, view_only_token):
name = str(uuid.uuid4())
status, data = api(
'POST',
'telescope',
data={
'name': name,
'nickname': name,
'lat': 0.0,
'lon': 0.0,
'elevation': 0.0,
'diameter': 10.0,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
telescope_id = data['data']['id']
instrument_name = str(uuid.uuid4())
status, data = api(
'POST',
'instrument',
data={
'name': instrument_name,
'type': 'imager',
'band': 'NIR',
'filters': ['f110w'],
'telescope_id': telescope_id,
},
token=super_admin_token,
)
assert status == 200
assert data['status'] == 'success'
instrument_id = data['data']['id']
status, data = api('DELETE', f'instrument/{instrument_id}', token=super_admin_token)
assert status == 200
assert data['status'] == 'success'
status, data = api('GET', f'instrument/{instrument_id}', token=view_only_token)
assert status == 400
| 26.488479
| 88
| 0.519311
| 606
| 5,748
| 4.745875
| 0.092409
| 0.073018
| 0.088665
| 0.097357
| 0.857789
| 0.840751
| 0.840751
| 0.840751
| 0.840751
| 0.840751
| 0
| 0.029094
| 0.324287
| 5,748
| 216
| 89
| 26.611111
| 0.71138
| 0
| 0
| 0.778351
| 0
| 0
| 0.18563
| 0.041406
| 0
| 0
| 0
| 0
| 0.195876
| 1
| 0.020619
| false
| 0
| 0.010309
| 0
| 0.030928
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82cfec4ad2b50b477f58059e6703ef816f3b3c80
| 8,399
|
py
|
Python
|
project/project/apps/user/tests/tests_view_ResetPassword.py
|
RignonNoel/django-init
|
4f00ec5f9ad8083a8dea5483c4e43712fceeba7a
|
[
"MIT"
] | null | null | null |
project/project/apps/user/tests/tests_view_ResetPassword.py
|
RignonNoel/django-init
|
4f00ec5f9ad8083a8dea5483c4e43712fceeba7a
|
[
"MIT"
] | null | null | null |
project/project/apps/user/tests/tests_view_ResetPassword.py
|
RignonNoel/django-init
|
4f00ec5f9ad8083a8dea5483c4e43712fceeba7a
|
[
"MIT"
] | 1
|
2019-11-20T17:24:33.000Z
|
2019-11-20T17:24:33.000Z
|
import json
from unittest import mock
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from django.urls import reverse
from django.test.utils import override_settings
from project.factories import UserFactory
from ..models import ActionToken
class ResetPasswordTests(APITestCase):
def setUp(self):
self.client = APIClient()
self.user = UserFactory()
self.user.set_password('Test123!')
self.user.is_active = False
self.user.save()
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"FRONTEND_INTEGRATION": {
"FORGOT_PASSWORD_URL": "fake_url",
}
}
)
def test_create_new_token(self):
"""
Ensure we can have a new token to change our password
"""
data = {
'email': self.user.email,
}
response = self.client.post(
reverse('reset_password'),
data,
format='json',
)
# The token has been created
tokens = ActionToken.objects.filter(
user=self.user,
type='password_change',
)
self.assertEqual(response.content, b'')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(len(tokens) == 1)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"FRONTEND_INTEGRATION": {
"FORGOT_PASSWORD_URL": "fake_url",
}
}
)
def test_create_new_token_without_email_param(self):
"""
Ensure we can't have a new token to change our password without
give our email in param
"""
data = dict()
response = self.client.post(
reverse('reset_password'),
data,
format='json',
)
# The token has been created
tokens = ActionToken.objects.filter(
user=self.user,
type='password_change',
)
content = {
'email': ["This field is required."],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(len(tokens) == 0)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"FRONTEND_INTEGRATION": {
"FORGOT_PASSWORD_URL": "fake_url",
}
}
)
def test_create_new_token_with_an_empty_email_param(self):
"""
Ensure we can't have a new token to change our password without
give our email in param
"""
data = {
'email': '',
}
response = self.client.post(
reverse('reset_password'),
data,
format='json',
)
# The token has been created
tokens = ActionToken.objects.filter(
user=self.user,
type='password_change',
)
content = {
'email': ["This field may not be blank."],
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(len(tokens) == 0)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"FRONTEND_INTEGRATION": {
"FORGOT_PASSWORD_URL": "fake_url",
}
}
)
def test_create_new_token_with_bad_email(self):
"""
Ensure we can't have a new token to change our password without
a valid email
"""
data = {
'email': 'test',
}
response = self.client.post(
reverse('reset_password'),
data,
format='json',
)
# The token has been created
tokens = ActionToken.objects.filter(
user=self.user,
type='password_change',
)
content = {'email': ['Enter a valid email address.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(len(tokens) == 0)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"FRONTEND_INTEGRATION": {
"FORGOT_PASSWORD_URL": "fake_url",
}
}
)
def test_create_new_token_with_non_existent_email(self):
"""
Ensure we can't have a new token to change our password without
a valid email
"""
data = {
'email': 'test@test.com',
}
response = self.client.post(
reverse('reset_password'),
data,
format='json',
)
# The token has been created
tokens = ActionToken.objects.filter(
user=self.user,
type='password_change',
)
content = {'email': ['No account associated to this email address.']}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(len(tokens) == 0)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"FRONTEND_INTEGRATION": {
"FORGOT_PASSWORD_URL": "fake_url",
}
}
)
def test_create_new_token_when_token_already_exist(self):
"""
Ensure we can have a new token to change our password
"""
# We create a token before launch the test
ActionToken.objects.create(
user=self.user,
type='password_change',
)
data = {
'email': self.user.email,
}
response = self.client.post(
reverse('reset_password'),
data,
format='json',
)
# The token has been created
tokens = ActionToken.objects.filter(
user=self.user,
type='password_change',
expired=False,
)
self.assertEqual(response.content, b'')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(len(tokens) == 1)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": False,
"FRONTEND_INTEGRATION": {
"FORGOT_PASSWORD_URL": "fake_url",
}
}
)
def test_create_new_token_without_email_service(self):
"""
Ensure we can have a new token to change our password
"""
data = {
'email': self.user.email,
}
response = self.client.post(
reverse('reset_password'),
data,
format='json',
)
# The token has been created
tokens = ActionToken.objects.filter(
user=self.user,
type='password_change',
)
self.assertEqual(response.content, b'')
self.assertEqual(response.status_code, status.HTTP_501_NOT_IMPLEMENTED)
self.assertTrue(len(tokens) == 0)
@override_settings(
LOCAL_SETTINGS={
"EMAIL_SERVICE": True,
"FRONTEND_INTEGRATION": {
"FORGOT_PASSWORD_URL": "fake_url",
}
}
)
@mock.patch('project.services.EmailMessage.send',
return_value=0)
def test_create_new_token_with_failure_on_email_service(self, send):
"""
Ensure we can have a new token to change our password
"""
data = {
'email': self.user.email,
}
response = self.client.post(
reverse('reset_password'),
data,
format='json',
)
# The token has been created
tokens = ActionToken.objects.filter(
user=self.user,
type='password_change',
)
content = {
'detail': "Your token has been created but no email "
"has been sent. Please contact the administration.",
}
self.assertEqual(json.loads(response.content), content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(len(tokens) == 1)
| 26.165109
| 79
| 0.548756
| 838
| 8,399
| 5.310263
| 0.156325
| 0.030562
| 0.056854
| 0.038427
| 0.830562
| 0.830562
| 0.818202
| 0.818202
| 0.818202
| 0.818202
| 0
| 0.006629
| 0.353375
| 8,399
| 320
| 80
| 26.246875
| 0.812742
| 0.095845
| 0
| 0.598214
| 0
| 0
| 0.148188
| 0.004614
| 0
| 0
| 0
| 0
| 0.107143
| 1
| 0.040179
| false
| 0.120536
| 0.035714
| 0
| 0.080357
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
7d61564ab0ae3aaba1273ff2da4140666522ccce
| 5,420
|
py
|
Python
|
lazydiff/tests/test_vector_backward.py
|
CS207-Project-Group-7/cs207-FinalProject
|
4bc3af5d97ac1a64045f3d10533ecf3dd018a763
|
[
"MIT"
] | 1
|
2019-02-11T03:15:23.000Z
|
2019-02-11T03:15:23.000Z
|
lazydiff/tests/test_vector_backward.py
|
CS207-Project-Group-7/cs207-FinalProject
|
4bc3af5d97ac1a64045f3d10533ecf3dd018a763
|
[
"MIT"
] | 5
|
2018-11-05T22:14:15.000Z
|
2018-12-08T08:32:56.000Z
|
lazydiff/tests/test_vector_backward.py
|
CS207-Project-Group-7/cs207-FinalProject
|
4bc3af5d97ac1a64045f3d10533ecf3dd018a763
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
from lazydiff.vars import Var
def test_init_var():
var = Var([1, 2, 3])
assert np.all(var.val == [1, 2, 3])
def test_invalid_arg_raises_error():
var = Var([1, 2, 3])
with pytest.raises(TypeError):
var.grad('invalid value')
def test_neg():
x = Var([1, 2, 3])
y = -x
y.backward()
assert np.all(y == Var([-1, -2, -3]))
assert np.all(y.grad(x) == np.array([-1, -1, -1]))
def test_abs():
x = Var([1, 2, -3])
y = abs(x)
y.backward()
assert np.all(y == Var([1, 2, 3]))
assert np.all(y.grad(x) == np.array([1, 1, -1]))
def test_add_vars():
x1 = Var([1, 2, 3])
x2 = Var([1, 2, 3])
y = x1 + x2
y.backward()
assert np.all(y == Var([2, 4, 6]))
assert np.all(y.grad(x1) == np.array([1, 1, 1]))
assert np.all(y.grad(x2) == np.array([1, 1, 1]))
def test_add_var_number():
x = Var([1, 2, 3])
y = x + 5
y.backward()
assert np.all(y == Var([6, 7, 8]))
assert np.all(y.grad(x) == np.array([1, 1, 1]))
def test_radd_var():
x = Var([1, 2, 3])
y = 6 + x
y.backward()
assert np.all(y == Var([7, 8, 9]))
assert np.all(y.grad(x) == np.array([1, 1, 1]))
def test_add_var_non_number():
with pytest.raises(TypeError):
Var([1, 2, 3]) + 'string'
def test_sub_vars():
x1 = Var([1, 1, 1])
x2 = Var([1, 2, 3])
y = x1 - x2
y.backward()
assert np.all(y == Var([0, -1, -2]))
assert np.all(y.grad(x1) == np.array([1, 1, 1]))
assert np.all(y.grad(x2) == np.array([-1, -1, -1]))
def test_sub_var_number():
x = Var([1, 2, 3])
y = x - 5
y.backward()
assert np.all(y == Var([-4, -3, -2]))
assert np.all(y.grad(x) == np.array([1, 1, 1]))
def test_rsub_var():
x = Var([1, 2, 3])
y = 6 - x
y.backward()
assert np.all(y == Var([5, 4, 3]))
assert np.all(y.grad(x) == np.array([-1, -1, -1]))
def test_sub_var_non_number():
with pytest.raises(TypeError):
Var([1, 2, 3]) - 'string'
def test_mul_vars():
x1 = Var(8)
x2 = Var([2, 2, 2])
y = x1 * x2
y.backward()
assert np.all(y == Var([16, 16, 16]))
assert np.all(y.grad(x1) == np.array([2, 2, 2]))
assert np.all(y.grad(x2) == np.array([8, 8, 8]))
def test_mul_var_number():
x = Var([3, 2, 1])
y = x * 5
y.backward()
assert np.all(y == Var([15, 10, 5]))
assert np.all(y.grad(x) == np.array([5, 5, 5]))
def test_rmul_vars():
x = Var([3, 3, 3])
y = 6 * x
y.backward()
assert np.all(y == Var([18, 18, 18]))
assert np.all(y.grad(x) == np.array([6, 6, 6]))
def test_mul_var_non_number():
with pytest.raises(TypeError):
Var([1, 2, 3]) * 'string'
def test_div_vars():
x = Var([8, 1])
x2 = Var([2, 1])
y = x / x2
y.backward()
assert np.all(y == Var([4, 1]))
assert np.all(y.grad(x) == np.array([0.5, 1]))
assert np.all(y.grad(x2) == np.array([-2., -1]))
def test_div_var_number():
x = Var([10, 10])
y = x / 5
y.backward()
assert np.all(y == Var([2, 2]))
assert np.all(y.grad(x) == np.array([0.2, .2]))
def test_div_var_fails_with_divide_by_zero():
with pytest.raises((ZeroDivisionError, FloatingPointError)):
Var([1, 2, 3]) / Var(0.)
def test_div_var_number_fails_with_divide_by_zero():
with pytest.raises((ZeroDivisionError, FloatingPointError)):
Var([1, 2, 3]) / 0.
def test_rdiv_vars():
x = Var([3, 3, 3])
y = 6 / x
y.backward()
assert np.all(y == Var([2, 2, 2]))
assert np.all(y.grad(x) == np.array([-2/3, -2/3, -2/3]))
def test_rdiv_var_fails_with_divide_by_zero():
with pytest.raises((ZeroDivisionError, FloatingPointError)):
1 / Var([0, 2, 3])
def test_div_var_non_number():
with pytest.raises(TypeError):
Var([1, 2, 3]) / 'string'
def test_pow_vars():
x = Var(np.e)
x2 = Var([1, 2, 3])
y = x ** x2
y.backward()
assert np.all(y == Var([np.e**1, np.e**2, np.e**3]))
assert np.all(y.grad(x) == np.array([1, 2*np.e, 3*np.e**2]))
assert np.all(y.grad(x2) == np.array([np.e, np.e**2, np.e**3]))
def test_pow_var_number():
x = Var([1, 2, 3])
y = x ** 5
y.backward()
assert np.all(y == Var([1, 32, 243]))
assert np.all(y.grad(x) == np.array([5, 80, 405]))
def test_rpow_vars():
x = Var([1, 2, 3])
y = np.e ** x
y.backward()
assert np.all(y == Var([np.e, np.e**2, np.e**3]))
assert np.all(y.grad(x) == np.array([np.e, np.e**2, np.e**3]))
def test_pow_var_non_number():
x = Var([1, 2, 3])
with pytest.raises(TypeError):
x **= 'string'
def test_iadd_banned():
x = Var([1, 2, 3])
with pytest.raises(TypeError):
x += 3
def test_isub_banned():
x = Var([1, 2, 3])
with pytest.raises(TypeError):
x -= 3
def test_imul_banned():
x = Var([1, 2, 3])
with pytest.raises(TypeError):
x *= 3
def test_idiv_banned():
x = Var([1, 2, 3])
with pytest.raises(TypeError):
x /= 3
def test_ipow_banned():
x = Var([1, 2, 3])
with pytest.raises(TypeError):
x **= 3
def test_composite1():
x1 = Var(-3)
x2 = Var([5, 10])
x3 = Var([10, 100])
y = abs(x1) / x2 * x3
y.backward()
assert np.all(y.grad(x1) == [-2, -10])
def test_composite2():
x1 = Var(-3)
x2 = Var([5, 10])
x3 = Var([1, 1])
y = x2**x1 / x3
y.backward()
assert np.all(y.grad(x3) == [-.008, -.001])
| 25.092593
| 67
| 0.528413
| 979
| 5,420
| 2.826353
| 0.082737
| 0.121431
| 0.166968
| 0.17781
| 0.814239
| 0.797253
| 0.784966
| 0.775931
| 0.724973
| 0.643296
| 0
| 0.078171
| 0.249446
| 5,420
| 215
| 68
| 25.209302
| 0.602016
| 0
| 0
| 0.353591
| 0
| 0
| 0.007934
| 0
| 0
| 0
| 0
| 0
| 0.232044
| 1
| 0.187845
| false
| 0
| 0.016575
| 0
| 0.20442
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7db737a04f629045a83ebd9adf8886259154cabf
| 42,898
|
py
|
Python
|
PU_Bayesian_classifiers/PU_Bayesian_classifiers.py
|
chengning-zhang/Bayesian-Classifers-for-PU_learning
|
5ec5a790364f2bcb524acec002753ba86cb61541
|
[
"MIT"
] | 4
|
2020-03-01T08:27:52.000Z
|
2021-06-16T17:17:19.000Z
|
PU_Bayesian_classifiers/PU_Bayesian_classifiers.py
|
chengning-zhang/Bayesian-Classifers-for-PU_learning
|
5ec5a790364f2bcb524acec002753ba86cb61541
|
[
"MIT"
] | 1
|
2020-03-02T04:43:36.000Z
|
2020-03-02T04:43:36.000Z
|
PU_Bayesian_classifiers/PU_Bayesian_classifiers.py
|
chengning-zhang/Bayesian-Classifers-for-PU_learning
|
5ec5a790364f2bcb524acec002753ba86cb61541
|
[
"MIT"
] | null | null | null |
# from google.colab import drive
# drive.mount('/content/drive')
# !pip install shap
# !pip install pyitlib
# import os
# os.path.abspath(os.getcwd())
# os.chdir('/content/drive/My Drive/Protein project')
# os.path.abspath(os.getcwd())
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Created on Mar 1, 2020
@author: Chengning Zhang
"""
import warnings
warnings.filterwarnings("ignore")
from __future__ import division # for float operation
from collections import Counter
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import recall_score # tp / (tp + fn)
from sklearn.metrics import precision_score # tp / (tp + fp)
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import KFold, StratifiedKFold
#from pyitlib import discrete_random_variable as drv
import time
import timeit
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted ### Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing underscore)
#from sklearn.utils.multiclass import unique_labels, not necessary, can be replaced by array(list(set()))
from sklearn import preprocessing
class Bayes_net_PU(BaseEstimator, ClassifierMixin):
"""
Bayesian network implementation for Postitive Unlabeled examples
API inspired by SciKit-learn.
"""
def predict_proba(self, X): # key prediction methods, all other prediction methods will use it first.
raise NotImplementedError
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,1)
Predicted target values for X
"""
Prob_1 = self.predict_proba(X)
return(np.where(Prob_1 > 0.5, '1', '0'))
def Conditional_log_likelihood(self,y_true,y_pred_prob):
"""Calculate the conditional log likelihood.
:param y_true: The true class labels. e.g ['1','1',.....'0','0']
:param y_pred_prob: np.array shows prob of class '1' for each instance.
:return: CLL. A scalar.
"""
cll = []
for i in range(len(y_pred_prob)):
cll.append(y_pred_prob[i] if y_true[i] == '1' else 1-y_pred_prob[i] )
cll = [np.log2(ele) for ele in cll]
cll = np.array(cll)
return(sum(cll))
def plot_tree_structure(self,mapping = None,figsize = (5,5)):
check_is_fitted(self)
parent = self.parent_
egdes = [(k,v) for v,k in parent.items() if k is not None]
G = nx.MultiDiGraph()
G.add_edges_from(egdes)
#mapping=dict(zip(range(8),['b0','b1','b2','b3','b4','b5','b6','b7']))
plt.figure(figsize=figsize)
nx.draw_networkx(G,nx.shell_layout(G))
class PNB(Bayes_net_PU):
name = "PNB"
def __init__(self, alpha = 1):
self.alpha = alpha
def fit(self,X_L, X_u, pri, M = None, case_control = True):
""" Implementation of a fitting function.
Parameters
----------
X_l : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input positive labeled samples.
X_u : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input unlabeled samples.
pri : scalar.
The prevalence probability (p(y = 1))
M : None
contact matrix.
case_control : Bool
Case control scenario or single-training data scenario
Returns
-------
self : object
Returns self.
"""
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
# 1: Learned from positive examples, P(xij|1) = N_L(xij)/N_L. N_L(xij), same for both scenario
# 2: Learned from Unlabeled examples, N_U(xij) or from U+L N_(U+L)(xij)
# 3: P(xi = j|c = 0), Listprob0, calculated from previous list
n_L,p = X_L.shape
# n_u,p = X_u.shape
if case_control:
X_U_or_UL = X_u
else:
X_U_or_UL = np.concatenate((X_L,X_u),axis = 0)
#
n_U_or_UT = X_U_or_UL.shape[0]
List_count_1 = {}
List_prob_1 = {} # {x0:{'1': p(x0 =1|y=1), '2': p(x0 =2|y=1), 'else': }, x1:{}, ... x7:{} }
#
List_count_U_or_UL = {}
#
List_prob_0 = {} # P(xi = j|c=0)
K = {} # X_i_L and X_i_u contains all possible values of x_i, there are not other values, different from supervised setting.
for i in range(p):
x_i_L = X_L[:,i]
x_i_U_or_UL = X_U_or_UL[:,i]
x_i_L_counter = Counter(x_i_L) # may be not need key error
x_i_U_or_UL_counter = Counter(x_i_U_or_UL)
x_i_values = set(x_i_L_counter.keys()).union(x_i_U_or_UL_counter.keys()) # all possible values of x_i
K[i] = len(list(x_i_values))
# part 1
x_i_L_prob = {key: (value + self.alpha) / (n_L + self.alpha * (K[i]) ) for key,value in x_i_L_counter.items()} # p(x|s=1) = p(x|y=1)
x_i_L_prob.update({key: (0 + self.alpha) / (n_L + self.alpha * (K[i]) ) for key in list(x_i_values) if key not in list(x_i_L_counter.keys()) } )
List_prob_1[i] = x_i_L_prob
List_count_1[i] = x_i_L_counter
# part 2
List_count_U_or_UL[i] = x_i_U_or_UL_counter
# part 3
x_i_0_prob = {key: max([0,x_i_U_or_UL_counter[key] - x_i_L_prob[key] * pri * n_U_or_UT]) for key in list(x_i_values)} # numeritor, can be negative, make it >=0
x_i_0_prob = {key:(self.alpha + value)/ (K[i]*self.alpha + n_U_or_UT * (1-pri) ) for key,value in x_i_0_prob.items()} # add psudo count and divied by dem
x_i_0_prob = {key: value/(sum(np.array(list(x_i_0_prob.values())))) for key,value in x_i_0_prob.items() } # normalize prob sum to 1, however, due to computation problem, it is not sum to 1
List_prob_0[i] = x_i_0_prob
# x_i_0_prob = {key: value/sum(np.array(list(x_i_0_prob.values()))) for key,value in x_i_0_prob.items() }
self.case_control_ = case_control
self.is_fitted_ = True
self.n_features_, self.K_, self.List_count_1_,self.List_prob_1_, self.List_count_U_or_UL_, self.List_prob_0_, self.prevalence_ = p, K, List_count_1,List_prob_1,List_count_U_or_UL,List_prob_0, pri
return self
def predict_proba(self,X):
"""
Return probability estimates for the test vector X. Usually it would be X_unlabeled
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
P(y=1|x) : array-like of shape (n_samples, )
Returns the probability of the samples for positive class in
the model.
"""
check_is_fitted(self)
X = check_array(X)
Prob_1 = []
for ins in X:
P1 = self.prevalence_ # don't need copy, immutable
P0 = 1 - P1
for i in range(self.n_features_):
P1 = P1 * (self.List_prob_1_[i][ins[i]])
P0 = P0 * (self.List_prob_0_[i][ins[i]])
# normalize proba
P = P1 + P0
P1 = P1/P; P0 = P0/P
Prob_1.append(P1)
Prob_1 = np.array(Prob_1) # for shap
return Prob_1
class PTAN(Bayes_net_PU):
name = "PTAN"
def __init__(self, alpha = 1,starting_node = 0):
self.starting_node = starting_node
self.alpha = alpha
def get_mutual_inf(self,X_L, X_u, pri, case_control):
"""get PU conditional mutual inf of all pairs of features, part of training
Parameters
----------
X_l : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input positive labeled samples.
X_u : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input unlabeled samples.
pri : scalar.
The prevalence probability (p(y = 1))
case_control : Bool
Case control scenario or single-training data scenario
Returns M
-------
np.array matrix.
"""
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
#
n_L,p = X_L.shape
# n_u,p = X_u.shape
if case_control:
X_U_or_UL = X_u
else:
X_U_or_UL = np.concatenate((X_L,X_u),axis = 0)
#
n_U_or_UL = X_U_or_UL.shape[0]
M = np.zeros((p,p)) # will not change global M, since new memory assigned for this local M
# part 1: proba that can be estimated from labeled examples. 1 P(xij,xkl|1), 2 p(xj|1), 3 p(xkl|1). P(xij,xkl|1) = N_L(xi=j,xk=l)/N_L
# part 2: P(xij,xkl) from U, P(xij,xkl) = N_U(xij,xkl) / n_U, or P(xij,xkl) from L+U, P(xij,xkl) = N_(L+U)(xij,xkl) / N_(L+U)
# part 3: p(xij,xkl|0),p(xij|0),p(xkl|0), same as PNB, from previous list
#
# List_prob_xi_xj_1 = {} # p(xij,xkl|c =1) = N_L(xij,xkl) / N_L and p(xij|c =1) = N_L(xij)/N_L
# List_count_xi_xj_1 = {} # N_L(xij,xkl) and N_L(xij)
#
# List_prob_xi_xj_U = {} # P(xij,xkl) = N_U(xij,xkl)/n_u
# List_count_xi_xj_U = {} # N_U(xij,xkl) and N_U(xij)
#
# List_prob_xi_xj_0 = {} # p(xij,xkl|0),and p(xij|0) obtained from previous lists
K = {}
X_values = {}
for i in range(p):
x_i_L = X_L[:,i]
x_i_U_or_UL = X_U_or_UL[:,i]
x_i_L_counter = Counter(x_i_L) # may be not need key error
x_i_U_or_UL_counter = Counter(x_i_U_or_UL) # N_U(xi = j) or N_(L+U)(xi =j)
x_i_values = list(set(x_i_L_counter.keys()).union(x_i_U_or_UL_counter.keys()))
K_i = len(list(x_i_values))
K[i] = K_i
X_values[i] = x_i_values
# part 1, p(xij|1) and N_L(xi = j)
x_i_L_prob = {key: (value + self.alpha) / (n_L + self.alpha * (K[i]) ) for key,value in x_i_L_counter.items()} # p(xi= j|s=1) = p(x|y=1)
x_i_L_prob.update({key: (0 + self.alpha) / (n_L + self.alpha * (K[i]) ) for key in x_i_values if key not in list(x_i_L_counter.keys()) } )
# List_prob_xi_xj_1[(i,i)] = x_i_L_prob
# List_count_xi_xj_1[(i,i)] = x_i_L_counter
# part 2, learn from U, N_U(xij) ,N_U(xij,xkl) or L+U, N_(L+U)(xij), N_(L+U)(xij,xkl)
xi_prob_U_or_UL = {key: (self.alpha + value) / (K_i*self.alpha + n_U_or_UL) for key,value in x_i_U_or_UL_counter.items()} # P(xij)
# List_prob_xi_xj_U[(i,i)] = xi_prob_U
# List_count_xi_xj_U[(i,i)] = x_i_u_counter
# part 3, p(xi =j | y=0)
x_i_0_prob = {key: max([0,x_i_U_or_UL_counter[key] - x_i_L_prob[key] * pri * n_U_or_UL]) for key in x_i_values} # N_U(xi =j) - N_u*p(xij, y =1) = N_U(xij,y=0) numeritor, can be negative, make it >=0
x_i_0_prob = {key:(self.alpha + value)/ (K[i]*self.alpha + n_U_or_UL * (1-pri) ) for key,value in x_i_0_prob.items()} # add psudo count and divied by dem
x_i_0_prob = {key: value/(sum(np.array(list(x_i_0_prob.values())))) for key,value in x_i_0_prob.items() } # normalize prob sum to 1, however, due to computation problem, it is not sum to 1
# List_prob_xi_xj_0[(i,i)] = x_i_0_prob
for j in range(i+1,p):
x_j_L = X_L[:,j]
x_j_U_or_UL = X_U_or_UL[:,j]
x_j_L_counter = Counter(x_j_L) # may be not need key error
x_j_U_or_UL_counter = Counter(x_j_U_or_UL)
x_j_values = list(set(x_j_L_counter.keys()).union(x_j_U_or_UL_counter.keys()))
K_j = len(list(x_j_values))
x_j_L_prob = {key: (value + self.alpha) / (n_L + self.alpha * (K_j) ) for key,value in x_j_L_counter.items()} # p(xj= sth|s=1) = p(x|y=1)
x_j_L_prob.update({key: (0 + self.alpha) / (n_L + self.alpha * (K_j) ) for key in x_j_values if key not in list(x_j_L_counter.keys()) } )
# part 3, p(xi =j | y=0)
x_j_0_prob = {key: max([0,x_j_U_or_UL_counter[key] - x_j_L_prob[key] * pri * n_U_or_UL]) for key in x_j_values} # numeritor, can be negative, make it >=0
x_j_0_prob = {key:(self.alpha + value)/ (K_j*self.alpha + n_U_or_UL * (1-pri) ) for key,value in x_j_0_prob.items()} # add psudo count and divied by dem
x_j_0_prob = {key: value/(sum(np.array(list(x_j_0_prob.values())))) for key,value in x_j_0_prob.items() } # normalize prob sum to 1, however, due to computation problem, it is not sum to 1
# part 1 P(xij,xkl|1) = N_L(xi=j,xk=l)/N_L and N_L(xi=j,xk=l)
xi_xj_count_1 = {(v1,v2): X_L[(X_L[:,i] == v1) & (X_L[:,j] == v2) ].shape[0] for v1 in x_i_values for v2 in x_j_values} # N_L(xi = j, xk = l)
xi_xj_prob_1 = {key: (self.alpha + value) / (K_i*K_j*self.alpha + n_L) for key,value in xi_xj_count_1.items()} # p(xij,xkl|1)
# List_prob_xi_xj_1[(i,j)] = xi_xj_prob_1
# List_count_xi_xj_1[(i,j)] = xi_xj_count_1
# part 2, learn from U, N_U(xij,xkl), or L+U
xi_xj_count_U_or_UL = {(v1,v2): X_U_or_UL[(X_U_or_UL[:,i] == v1) & (X_U_or_UL[:,j] == v2) ].shape[0] for v1 in x_i_values for v2 in x_j_values} # N_U(xi = j, xk = l)
xi_xj_prob_U_or_UL = {key: (self.alpha + value) / (K_i*K_j*self.alpha + n_U_or_UL) for key,value in xi_xj_count_U_or_UL.items()} # P(xij,xkl)
# List_prob_xi_xj_U[(i,j)] = xi_xj_prob_U
# List_count_xi_xj_U[(i,j)] = xi_xj_count_U
# part 3, p(xi = j,xk =l |0)
xi_xj_prob_0 = {(v1,v2): max([0, xi_xj_count_U_or_UL[(v1,v2)] - xi_xj_prob_1[(v1,v2)] * pri * n_U_or_UL ]) for v1 in x_i_values for v2 in x_j_values}# numeritor, can be negative, make it >=0
xi_xj_prob_0 = {key: (self.alpha + value)/ (K_j*K_i*self.alpha + n_U_or_UL * (1-pri) ) for key,value in xi_xj_prob_0.items()} # add psudo count and divied by dem
xi_xj_prob_0 = {key: value/(sum(np.array(list(xi_xj_prob_0.values())))) for key,value in xi_xj_prob_0.items() } # normalize prob sum to 1, however, due to computation problem, it is not sum to 1
# List_prob_xi_xj_0[(i,j)] = xi_xj_prob_0
# M[i,j]
M[i,j] = sum( np.array([pri* xi_xj_prob_1[(v1,v2)]* np.log( xi_xj_prob_1[(v1,v2)]/(x_i_L_prob[v1]* x_j_L_prob[v2]) ) +
(xi_xj_prob_U_or_UL[(v1,v2)] - pri* xi_xj_prob_1[(v1,v2)] )* np.log(xi_xj_prob_0[(v1,v2)] / ( x_i_0_prob[v1]*x_j_0_prob[v2] ) )
for v1 in x_i_values for v2 in x_j_values] ) )
M[j,i] = M[i,j]
# for bug, x1, x3
# if i == 1 and j == 3:
# part1 = [pri* xi_xj_prob_1[(v1,v2)]* np.log( xi_xj_prob_1[(v1,v2)]/(x_i_L_prob[v1]* x_j_L_prob[v2]) )
# for v1 in x_i_values for v2 in x_j_values]
# part2 = [(xi_xj_prob_U[(v1,v2)] - pri* xi_xj_prob_1[(v1,v2)] )* np.log(xi_xj_prob_0[(v1,v2)] / ( x_i_0_prob[v1]*x_j_0_prob[v2] ) )
# for v1 in x_i_values for v2 in x_j_values]
# self.n_L_,self.n_features_, self.n_U_, self.M_,self.List_prob_xi_xj_1_, self.List_count_xi_xj_1_,self.List_prob_xi_xj_U_,self.List_count_xi_xj_U_,self.List_prob_xi_xj_0_,self.K_,self.prior_ = n_L,p,n_u,M,List_prob_xi_xj_1,List_count_xi_xj_1,List_prob_xi_xj_U,List_count_xi_xj_U,List_prob_xi_xj_0,K,pri
# self.part1,self.part2 = part1,part2
# return n_L,p,n_u,M,List_prob_xi_xj_1,List_count_xi_xj_1,List_prob_xi_xj_U,List_count_xi_xj_U,List_prob_xi_xj_0,K,pri
return n_L,p,n_U_or_UL,M,K,X_values
def Findparent(self,X_L, X_u, pri, case_control):
# n_L,p,n_u,M,List_prob_xi_xj_1,List_count_xi_xj_1,List_prob_xi_xj_U,List_count_xi_xj_U,List_prob_xi_xj_0,K,pri = self.get_mutual_inf(X_L, X_u, pri)
n_L,p,n_U_or_UL,M,K,x_values = self.get_mutual_inf(X_L, X_u, pri, case_control)
np.fill_diagonal(M,0)
V = range(p) # set of all nodes
st = self.starting_node
Vnew = [st] # vertex that already found their parent. intitiate it with starting node. TAN randomly choose one
parent = {st:None} # use a dict to show nodes' interdepedency
while set(Vnew) != set(V): # when their are still nodes whose parents are unknown.
index_i = [] # after for loop, has same length as Vnew, shows the closest node that not in Vnew with Vnew.
max_inf = [] # corresponding distance
for i in range(len(Vnew)): # can be paralelled
vnew = Vnew[i]
ListToSorted = [e for e in M[:,vnew]] #
index = sorted(range(len(ListToSorted)),key = lambda k: ListToSorted[k],reverse = True)
index_i.append([ele for ele in index if ele not in Vnew][0])
max_inf.append(M[index_i[-1],vnew])
index1 = sorted(range(len(max_inf)),key = lambda k: max_inf[k],reverse = True)[0] ## relative position, Vnew[v1,v2] index_i[v4,v5] max_inf[s1,s2] index1 is the position in those 3 list
Vnew.append(index_i[index1]) # add in that node
parent[index_i[index1]] = Vnew[index1] # add direction, it has to be that the new added node is child, otherwise some nodes has 2 parents which is wrong.
#return parent,n_L,p,n_u,M,List_prob_xi_xj_1,List_count_xi_xj_1,List_prob_xi_xj_U,List_count_xi_xj_U,List_prob_xi_xj_0,K,pri
return parent,n_L,p,n_U_or_UL,M,K,x_values
def fit(self,X_L, X_u, pri, M = None, case_control = True): # this is based on trainning data !!!
"""Implementation of fitting, part of training
Parameters
----------
X_l : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input positive labeled samples.
X_u : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input unlabeled samples.
pri : scalar.
The prevalence probability (p(y = 1))
M : None
For consistency purpose, it will be ignored.
case_control : Bool
Case control scenario or single-training data scenario
Returns self
-------
self.
"""
parent,n_L,p,n_U_or_UL,M,K,x_values = self.Findparent(X_L, X_u, pri, case_control)
if case_control:
X_U_or_UL = X_u
else:
X_U_or_UL = np.concatenate((X_L,X_u),axis = 0)
# part 1: proba that can be estimated from labeled examples. 1 P(xij|1,xkl), 2 p(x_root|1) = N_L(x_root)/N_L, P(xij|1,xkl) = N_L(xi=j,xk=l)/N_L(xkl)
# part 2: learn from U, N_U(xij,xkl), and N_U(xkl)
# part 3: p(xij|0,xkl),p(x_root|0) from previous list
#
List_prob_1 = {} # 1 P(xij|1,xkl), 2 p(x_root|1)
List_count_1 = {} # N_L(xij,xpal) and N_L(xij)
#
List_count_U_or_UL = {} # N_U(xij,xkl) and N_U(xij)
#
List_prob_0 = {} # p(xij|0,xkl),p(x_root|0)
# for root node
root_i = self.starting_node
x_i_values = x_values[root_i]
# part 1
x_i_L = X_L[:,root_i]
x_i_L_counter = Counter(x_i_L)
x_i_L_prob = {key: (x_i_L_counter[key]+self.alpha)/(K[root_i]*self.alpha + n_L ) for key in x_i_values}
List_prob_1[root_i] = x_i_L_prob
List_count_1[root_i] = x_i_L_counter
# part 2
x_i_U_or_UL = X_U_or_UL[:,root_i]
x_i_U_or_UL_counter = Counter(x_i_U_or_UL)
List_count_U_or_UL[root_i] = x_i_U_or_UL_counter
# part 3
x_i_0_prob = {key: max([0,x_i_U_or_UL_counter[key] - x_i_L_prob[key] * pri * n_U_or_UL]) for key in x_i_values} # N_U(xi =j) - N_u*p(xij, y =1) = N_U(xij,y=0) numeritor, can be negative, make it >=0
x_i_0_prob = {key:(self.alpha + value)/ (K[root_i]*self.alpha + n_U_or_UL * (1-pri) ) for key,value in x_i_0_prob.items()} # add psudo count and divied by dem
x_i_0_prob = {key: value/(sum(np.array(list(x_i_0_prob.values())))) for key,value in x_i_0_prob.items() } # normalize prob sum to 1, however, due to computation problem, it is not sum to 1
List_prob_0[root_i] = x_i_0_prob
#
for i in [e for e in range(0,p) if e != root_i]:
x_i_values = x_values[i]
x_i_parent_Value = x_values[parent[i]]
# part 1, P(xij|1,xkl) = N_L(xi=j,xk=l)/N_L(xkl)
List_count_1[i] = {v2: {v1:X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] for v1 in x_i_values} for v2 in x_i_parent_Value} # {pva1: {'1': , '2':, '3': }, pval2:{}}
List_prob_1[i] = {v2: {v1:(X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] + self.alpha)/ (X_L[(X_L[:,parent[i]] == v2)].shape[0] + self.alpha*K[i]) for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 2
List_count_U_or_UL[i] = {v2: {v1:X_U_or_UL[(X_U_or_UL[:,i] == v1) & (X_U_or_UL[:,parent[i]] == v2)].shape[0] for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 3
x_i_0_prob = {v2: {v1: List_count_U_or_UL[i][v2][v1] - List_prob_1[i][v2][v1]*pri* sum(list(List_count_U_or_UL[i][v2].values())) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1: max([0,x_i_0_prob[v2][v1] ]) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1:(x_i_0_prob[v2][v1] + self.alpha)/(self.alpha*K[i] + (1-pri)*sum(list(List_count_U_or_UL[i][v2].values())) ) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1:x_i_0_prob[v2][v1]/sum(list(x_i_0_prob[v2].values())) for v1 in x_i_values} for v2 in x_i_parent_Value} # normalize
List_prob_0[i] = x_i_0_prob
self.case_control_ = case_control
self.is_fitted_ = True
self.parent_, self.conditional_MI_ = parent, M
self.n_features_, self.K_, self.List_count_1_,self.List_prob_1_, self.List_count_U_or_UL_, self.List_prob_0_, self.prevalence_ = p, K, List_count_1,List_prob_1,List_count_U_or_UL,List_prob_0, pri
return self
def predict_proba(self,X):
check_is_fitted(self)
X = check_array(X)
Prob_1 = []
root_i = self.starting_node
for ins in X:
P1 = self.prevalence_
P0 = 1 - P1
# root_i
P1 = P1 * (self.List_prob_1_[root_i][ins[root_i]])
P0 = P0 * (self.List_prob_0_[root_i][ins[root_i]])
for i in [e for e in range(0,self.n_features_) if e != root_i]:
pValue = ins[self.parent_[i]]
P1 = P1 * (self.List_prob_1_[i][pValue][ins[i]])
P0 = P0 * (self.List_prob_0_[i][pValue][ins[i]])
P = P1 + P0
P1 = P1/P; P0 = P0/P
Prob_1.append(P1)
#
Prob_1 = np.array(Prob_1)
return Prob_1
class PSTAN(Bayes_net_PU):
name = "PSTAN"
def __init__(self, alpha = 1,starting_node = 0):
self.starting_node = starting_node
self.alpha = alpha
def Findparent(self, M):
M = M.copy() # to avoid change global M
np.fill_diagonal(M,0)
p = int(M.shape[0])
V = range(p) # set of all nodes
st = self.starting_node
Vnew = [st] # vertex that already found their parent. intitiate it with starting node. TAN randomly choose one
parent = {st:None} # use a dict to show nodes' interdepedency
while set(Vnew) != set(V): # when their are still nodes whose parents are unknown.
index_i = [] # after for loop, has same length as Vnew, shows the closest node that not in Vnew with Vnew.
max_inf = [] # corresponding distance
for i in range(len(Vnew)): # can be paralelled
vnew = Vnew[i]
ListToSorted = [e for e in M[:,vnew]] # does not need int(e)
index = sorted(range(len(ListToSorted)),key = lambda k: ListToSorted[k],reverse = True)
index_i.append([ele for ele in index if ele not in Vnew][0])
max_inf.append(M[index_i[-1],vnew])
index1 = sorted(range(len(max_inf)),key = lambda k: max_inf[k],reverse = True)[0] ## relative position, Vnew[v1,v2] index_i[v4,v5] max_inf[s1,s2] index1 is the position in those 3 list
Vnew.append(index_i[index1]) # add in that node
parent[index_i[index1]] = Vnew[index1] # add direction, it has to be that the new added node is child, otherwise some nodes has 2 parents which is wrong.
return parent
def fit(self,X_L, X_u, pri, M, case_control = True): # this is based on trainning data !!!
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
n_L,p = X_L.shape
# n_u,p = X_u.shape
if case_control:
X_U_or_UL = X_u
else:
X_U_or_UL = np.concatenate((X_L,X_u),axis = 0)
#
n_U_or_UL = X_U_or_UL.shape[0]
parent = self.Findparent(M)
# part 1: proba that can be estimated from labeled examples. 1 P(xij|1,xkl), 2 p(x_root|1) = N_L(x_root)/N_L, P(xij|1,xkl) = N_L(xi=j,xk=l)/N_L(xkl)
# part 2: learn from U, N_U(xij,xkl), and N_U(xkl)
# part 3: p(xij|0,xkl),p(x_root|0) from previous list
#
List_prob_1 = {} # 1 P(xij|1,xkl), 2 p(x_root|1)
List_count_1 = {} # N_L(xij,xpal) and N_L(xij)
#
List_count_U_or_UL = {} # N_U(xij,xkl) and N_U(xij)
#
List_prob_0 = {} # p(xij|0,xkl),p(x_root|0)
K = {}
# for root node
root_i = self.starting_node
x_i_L = X_L[:,root_i]
x_i_L_counter = Counter(x_i_L)
x_i_U_or_UL = X_U_or_UL[:,root_i]
x_i_U_or_UL_counter = Counter(x_i_U_or_UL)
x_i_values = list(set(x_i_L_counter.keys()).union(x_i_U_or_UL_counter.keys()))
K[root_i] = len(list(x_i_values))
# part 1
x_i_L_prob = {key: (x_i_L_counter[key]+self.alpha)/(K[root_i]*self.alpha + n_L ) for key in x_i_values}
List_prob_1[root_i] = x_i_L_prob
List_count_1[root_i] = x_i_L_counter
# part 2
List_count_U_or_UL[root_i] = x_i_U_or_UL_counter
# part 3
x_i_0_prob = {key: max([0,x_i_U_or_UL_counter[key] - x_i_L_prob[key] * pri * n_U_or_UL]) for key in x_i_values} # N_U(xi =j) - N_u*p(xij, y =1) = N_U(xij,y=0) numeritor, can be negative, make it >=0
x_i_0_prob = {key:(self.alpha + value)/ (K[root_i]*self.alpha + n_U_or_UL * (1-pri) ) for key,value in x_i_0_prob.items()} # add psudo count and divied by dem
x_i_0_prob = {key: value/(sum(np.array(list(x_i_0_prob.values())))) for key,value in x_i_0_prob.items() } # normalize prob sum to 1, however, due to computation problem, it is not sum to 1
List_prob_0[root_i] = x_i_0_prob
#
for i in [e for e in range(0,p) if e != root_i]:
x_i_values = list(set(X_L[:,i]).union(X_U_or_UL[:,i]))
x_i_parent_Value = list(set(X_L[:,parent[i]]).union(X_U_or_UL[:,parent[i] ] ) )
K[i] = len(x_i_values)
# part 1, P(xij|1,xkl) = N_L(xi=j,xk=l)/N_L(xkl)
List_count_1[i] = {v2: {v1:X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] for v1 in x_i_values} for v2 in x_i_parent_Value} # {pva1: {'1': , '2':, '3': }, pval2:{}}
List_prob_1[i] = {v2: {v1:(X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] + self.alpha)/ (X_L[(X_L[:,parent[i]] == v2)].shape[0] + self.alpha*K[i]) for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 2
List_count_U_or_UL[i] = {v2: {v1:X_U_or_UL[(X_U_or_UL[:,i] == v1) & (X_U_or_UL[:,parent[i]] == v2)].shape[0] for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 3
x_i_0_prob = {v2: {v1: List_count_U_or_UL[i][v2][v1] - List_prob_1[i][v2][v1]*pri* sum(list(List_count_U_or_UL[i][v2].values())) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1: max([0,x_i_0_prob[v2][v1] ]) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1:(x_i_0_prob[v2][v1] + self.alpha)/(self.alpha*K[i] + (1-pri)*sum(list(List_count_U_or_UL[i][v2].values())) ) for v1 in x_i_values} for v2 in x_i_parent_Value}
x_i_0_prob = {v2: {v1:x_i_0_prob[v2][v1]/sum(list(x_i_0_prob[v2].values())) for v1 in x_i_values} for v2 in x_i_parent_Value} # normalize
List_prob_0[i] = x_i_0_prob
self.case_control_ = case_control
self.is_fitted_ = True
self.parent_ = parent
self.n_features_, self.K_, self.List_count_1_,self.List_prob_1_, self.List_count_U_, self.List_prob_0_, self.prevalence_ = p, K, List_count_1,List_prob_1,List_count_U_or_UL,List_prob_0, pri
return self
def predict_proba(self,X):
check_is_fitted(self)
X = check_array(X)
Prob_1 = []
root_i = self.starting_node
for ins in X:
P1 = self.prevalence_
P0 = 1 - P1
# root_i
P1 = P1 * (self.List_prob_1_[root_i][ins[root_i]])
P0 = P0 * (self.List_prob_0_[root_i][ins[root_i]])
for i in [e for e in range(0,self.n_features_) if e != root_i]:
pValue = ins[self.parent_[i]]
P1 = P1 * (self.List_prob_1_[i][pValue][ins[i]])
P0 = P0 * (self.List_prob_0_[i][pValue][ins[i]])
P = P1 + P0
P1 = P1/P; P0 = P0/P
Prob_1.append(P1)
#
Prob_1 = np.array(Prob_1)
return Prob_1
class PESTAN(Bayes_net_PU):
name = "PESTAN"
def __init__(self,alpha = 1):
self.alpha = alpha
def fit(self,X_L, X_u, pri, M, case_control = True):
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
n_L,p = X_L.shape
n_u,p = X_u.shape
models = []
## train p PSTAN base models
for i in range(p):
model = PSTAN(self.alpha, starting_node= i)
model.fit(X_L, X_u, pri, M, case_control)
models.append(model)
self.case_control_ = case_control
self.models_, self.n_features_ = models, p
self.is_fitted_ = True
return self
def predict_proba(self,X):
check_is_fitted(self)
X = check_array(X)
Prob_1 = 0
for model in self.models_:
Prob_1 += model.predict_proba(X) # np array here
Prob_1 = Prob_1/(self.n_features_)
return(Prob_1)
class PETAN(Bayes_net_PU):
name = "PETAN"
def __init__(self,alpha = 1):
self.alpha = alpha
def fit(self,X_L, X_u, pri, M, case_control = True):
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
n_L,p = X_L.shape
n_u,p = X_u.shape
models = []
## train p PTAN base models
for i in range(p):
model = PTAN(self.alpha, starting_node= i)
model.fit(X_L, X_u, pri,case_control)
models.append(model)
#append STAN
model = PSTAN(self.alpha, starting_node = 0) #
model.fit(X_L, X_u, pri, M,case_control)
models.append(model)
self.models_, self.n_features_ = models, p
self.is_fitted_ = True
return self
def predict_proba(self,X):
check_is_fitted(self)
X = check_array(X)
Prob_1 = 0
for model in self.models_:
Prob_1 += model.predict_proba(X) # np array here
Prob_1 = Prob_1/(self.n_features_+ 1)
return(Prob_1)
# WNB and WTAN
class WNB(Bayes_net_PU):
name = "WNB"
def __init__(self,alpha = 1):
self.alpha = alpha
def fit(self,X_L, X_u, pri, M = None, case_control = True, model_class = LogisticRegression, **kwargs):
""" Implementation of a fitting function.
Get fitted model that predict p(s=1|x), not related to sampling scenario
Parameters
----------
X_l : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input positive labeled samples.
X_u : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input unlabeled samples.
pri : scalar
The prevalence p(y=1)
M : None, should not be used.
contact matrix.
case_control : Bool
Case control scenario or single-training data scenario, only change c_hat
Other part are same in both scenario.
model_class : a sklearn estimator, preferred logistic regression
since it gives calibrated proba, predict p(s=1|x)
**kwargs :
extra parameters for model_class
Returns self
-------
self
"""
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
n_L,p = X_L.shape
# encode categorical features
X = np.concatenate((X_L,X_u), axis = 0)
enc = preprocessing.OneHotEncoder(drop='first').fit(X)
X = enc.transform(X).toarray()
# X = pd.DataFrame(X).astype('category') # convert to categorical, for logistic regression to work. Does not work in general, has to encode, but different proba
y = np.concatenate( (np.repeat('1',X_L.shape[0] ), np.repeat('0',X_u.shape[0]) ),axis = 0)
# fit model g(x) = p(s=1|x)
model = model_class(**kwargs)
model.fit(X,y)
# estimate p(s=1)
p_s_1 = X_L.shape[0]/(X_L.shape[0]+X_u.shape[0])
# estimate c
if case_control:
c = p_s_1/(pri*(1-p_s_1) + p_s_1)
else:
c = p_s_1/pri
# estimate w(x)
# w_L = np.repeat(1,X_L.shape[0])
inx = list(model.classes_ ).index('1')
g_U = model.predict_proba( X[n_L:] )[:,inx] # let us assume it is already calibrated ,it that already calibrated?
w_U = ((1-c)/c) * (g_U/(1-g_U)) # maybe need to normalize
w_U = w_U - min(w_U) # make non-negative
w_U = w_U / max(w_U) # 0-1
# learning the coef_, p(xij|1), p(xij|0)
# extreme case: w_U correctly weight positive 1 and negative 0 in U, originally p(xij|1) = N_L(xij)/N_L,
# List_count_1 = {}
List_prob_1 = {} # {x0:{'1': p(x0 =1|y=1), '2': p(x0 =2|y=1), 'else': }, x1:{}, ... x7:{} }
#
List_prob_0 = {} # P(xi = j|c=0)
for i in range(p):
x_i_L_counter = Counter(X_L[:,i])
x_i_values = list(set(X_L[:,i]).union(set(X_u[:,i])))
# X_u positive weight counter, X_u negative weight counter
X_i_U_1_counter = {val: w_U[X_u[:,i] == val].sum() for val in x_i_values}
X_i_U_0_counter = {val: (1-w_U)[X_u[:,i] == val].sum() for val in x_i_values} # w_U has to be <1
# part 1, p(xi = j|1) = (N_L(xij) + sum_U_xij(w_U))/( n_L + sum(w_U))
List_prob_1[i] = {key: (self.alpha + x_i_L_counter[key] + X_i_U_1_counter[key])/ (self.alpha*len(x_i_values) + n_L + w_U.sum() ) for key in x_i_values}
# part 2, p(xi = j|1)
List_prob_0[i] = {key: (self.alpha + X_i_U_0_counter[key])/ ((1-w_U).sum() + self.alpha*len(x_i_values) ) for key in x_i_values}
self.is_fitted_ = True
self.case_control_ = case_control
self.List_prob_1_, self.List_prob_0_, self.c_, self.n_features_, self.w_U_, self.prevalence_ = List_prob_1, List_prob_0, c, p, w_U, pri
return self
def predict_proba(self,X):
"""
Return probability estimates for the test vector X. Usually it would be X_unlabeled
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
P(y=1|x) : array-like of shape (n_samples, )
Returns the probability of the samples for positive class in
the model.
"""
check_is_fitted(self)
X = check_array(X)
Prob_1 = []
for ins in X:
P1 = self.prevalence_ # don't need copy, immutable
P0 = 1 - P1
for i in range(self.n_features_):
P1 = P1 * (self.List_prob_1_[i][ins[i]])
P0 = P0 * (self.List_prob_0_[i][ins[i]])
# normalize proba
P = P1 + P0
P1 = P1/P; P0 = P0/P
Prob_1.append(P1)
Prob_1 = np.array(Prob_1) # for shap
return Prob_1
class WTAN(Bayes_net_PU):
name = "WTAN"
def __init__(self,alpha = 1,starting_node = 0):
self.alpha = alpha
self.starting_node = starting_node
def Findparent(self, M):
M = M.copy() # to avoid change global M
np.fill_diagonal(M,0)
p = int(M.shape[0])
V = range(p) # set of all nodes
st = self.starting_node
Vnew = [st] # vertex that already found their parent. intitiate it with starting node. TAN randomly choose one
parent = {st:None} # use a dict to show nodes' interdepedency
while set(Vnew) != set(V): # when their are still nodes whose parents are unknown.
index_i = [] # after for loop, has same length as Vnew, shows the closest node that not in Vnew with Vnew.
max_inf = [] # corresponding distance
for i in range(len(Vnew)): # can be paralelled
vnew = Vnew[i]
ListToSorted = [e for e in M[:,vnew]] # does not need int(e)
index = sorted(range(len(ListToSorted)),key = lambda k: ListToSorted[k],reverse = True)
index_i.append([ele for ele in index if ele not in Vnew][0])
max_inf.append(M[index_i[-1],vnew])
index1 = sorted(range(len(max_inf)),key = lambda k: max_inf[k],reverse = True)[0] ## relative position, Vnew[v1,v2] index_i[v4,v5] max_inf[s1,s2] index1 is the position in those 3 list
Vnew.append(index_i[index1]) # add in that node
parent[index_i[index1]] = Vnew[index1] # add direction, it has to be that the new added node is child, otherwise some nodes has 2 parents which is wrong.
return parent
def fit(self,X_L, X_u, pri, M, case_control = True, model_class = LogisticRegression, **kwargs):
""" Implementation of a fitting function.
Get fitted model that predict p(s=1|x), not related to sampling scenario
Parameters
----------
X_l : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input positive labeled samples.
X_u : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input unlabeled samples.
pri : scalar
The prevalence p(y=1)
M : np.matrix, shpae (n_features, n_features)
contact matrix
case_control : Bool
Case control scenario or single-training data scenario
model_class : a sklearn estimator, preferred logistic regression
since it gives calibrated proba, predict p(s=1|x)
**kwargs :
extra parameters for model_class
Returns self
-------
self
"""
X_L = check_array(X_L)
X_u = check_array(X_u)
if X_L.shape[1] != X_u.shape[1]:
raise ValueError('labeled data and unlabeled data have different number of features ')
n_L,p = X_L.shape
# parent
parent = self.Findparent(M)
# fit model g(x) = p(s=1|x)
X = np.concatenate((X_L,X_u), axis = 0)
enc = preprocessing.OneHotEncoder(drop='first').fit(X)
X = enc.transform(X).toarray()
# X = pd.DataFrame(X).astype('category') # convert to categorical, for logistic regression to work
y = np.concatenate( (np.repeat('1',X_L.shape[0] ), np.repeat('0',X_u.shape[0]) ),axis = 0)
#
model = model_class(**kwargs)
model.fit(X,y)
# estimate p(s=1)
p_s_1 = X_L.shape[0]/(X_L.shape[0]+X_u.shape[0])
# estimate c
if case_control:
c = p_s_1/(pri*(1-p_s_1) + p_s_1)
else:
c = p_s_1/pri
# estimate w(x)
inx = list(model.classes_ ).index('1')
g_U = model.predict_proba( X[n_L:] )[:,inx] # let us assume it is already calibrated ,it that already calibrated?
w_U = ((1-c)/c) * (g_U/(1-g_U)) # maybe need to normalize
w_U = w_U - min(w_U) # make non-negative
w_U = w_U / max(w_U) # 0-1
# learning the coef_, p(xij|1,xpal), p(xij|0,xpal)
# extreme case: w_U correctly weight positive 1 and negative 0 in U, originally p(xij|1) = N_L(xij)/N_L,
# List_count_1 = {}
List_prob_1 = {} #
#
List_prob_0 = {} # P(xi = j|c=0)
# for root node
root_i = self.starting_node
x_i_L_counter = Counter(X_L[:,root_i])
x_i_values = list(set(X_L[:,root_i]).union(set(X_u[:,root_i])))
X_i_U_1_counter = {val: w_U[X_u[:,root_i] == val].sum() for val in x_i_values}
X_i_U_0_counter = {val: (1-w_U)[X_u[:,root_i] == val].sum() for val in x_i_values}
# part 1, p(xi = j|1) = (N_L(xij) + sum_U_xij(w_U))/( n_L + sum(w_U))
List_prob_1[root_i] = {key: (self.alpha + x_i_L_counter[key] + X_i_U_1_counter[key]) / (n_L + w_U.sum() + self.alpha*len(x_i_values) ) for key in x_i_values}
# part 2, p(xi = j|1)
List_prob_0[root_i] = {key: ( self.alpha + X_i_U_0_counter[key])/ ((1-w_U).sum() + self.alpha*len(x_i_values) ) for key in x_i_values}
# for other nodes
for i in [e for e in range(0,p) if e != root_i]:
x_i_values = list(set(X_L[:,i]).union(X_u[:,i]))
x_i_parent_Value = list(set(X_L[:,parent[i]]).union(X_u[:,parent[i] ] ) )
# part 1, p(xij|1,xkl)
List_prob_1[i] = {v2: {v1: (self.alpha + X_L[(X_L[:,i] == v1) & (X_L[:,parent[i]] == v2)].shape[0] + w_U[(X_u[:,i] == v1) & (X_u[:,parent[i]] == v2)].sum() ) /
( X_L[(X_L[:,parent[i]] == v2)].shape[0] + w_U[(X_u[:,parent[i]] == v2)].sum()+ self.alpha*len(x_i_values) )
for v1 in x_i_values} for v2 in x_i_parent_Value}
# part 2 , p(xij|0,xkl)
List_prob_0[i] = {v2: {v1: (self.alpha + (1-w_U)[(X_u[:,i] == v1) & (X_u[:,parent[i]] == v2)].sum() ) /
( (1-w_U)[(X_u[:,parent[i]] == v2)].sum() + self.alpha*len(x_i_values) )
for v1 in x_i_values} for v2 in x_i_parent_Value}
self.case_control_ = case_control
self.is_fitted_ = True
self.parent_ = parent
self.case_control_ = case_control
self.List_prob_1_, self.List_prob_0_, self.c_, self.n_features_, self.w_U_, self.prevalence_ = List_prob_1, List_prob_0, c, p, w_U, pri
return self
def predict_proba(self,X):
"""
Return probability estimates for the test vector X. Usually it would be X_unlabeled
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
P(y=1|x) : array-like of shape (n_samples, )
Returns the probability of the samples for positive class in
the model.
"""
check_is_fitted(self)
X = check_array(X)
Prob_1 = []
root_i = self.starting_node
for ins in X:
P1 = self.prevalence_ # don't need copy, immutable
P0 = 1 - P1
# root_i
P1 = P1 * (self.List_prob_1_[root_i][ins[root_i]])
P0 = P0 * (self.List_prob_0_[root_i][ins[root_i]])
for i in [e for e in range(0,self.n_features_) if e != root_i]:
pValue = ins[self.parent_[i]]
P1 = P1 * (self.List_prob_1_[i][pValue][ins[i]])
P0 = P0 * (self.List_prob_0_[i][pValue][ins[i]])
# normalize proba
P = P1 + P0
P1 = P1/P; P0 = P0/P
Prob_1.append(P1)
#
Prob_1 = np.array(Prob_1) # for shap
return Prob_1
| 47.192519
| 311
| 0.616998
| 7,822
| 42,898
| 3.09908
| 0.058297
| 0.017244
| 0.020214
| 0.014727
| 0.861351
| 0.837218
| 0.818324
| 0.805289
| 0.784456
| 0.772658
| 0
| 0.027399
| 0.246189
| 42,898
| 908
| 312
| 47.244493
| 0.722238
| 0.334281
| 0
| 0.750473
| 0
| 0
| 0.019048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05482
| false
| 0
| 0.032136
| 0
| 0.145558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7dc21aa0c161b743116f5ff67230b1c4e2a991c8
| 20,662
|
py
|
Python
|
parser/team01/calcularDelete.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/team01/calcularDelete.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/team01/calcularDelete.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
import sys
import ts as TS
import instrucciones as Instruccion
import tempfile
from datetime import datetime
from pprint import pprint
lstResultado = []
contador = 1
x = 0
def inc():
global x
x += 1
return x
def calcularDelete(arbol,ts):
global lstResultado
global contador
contador += 1
print("--#Iniciando calcularSelect[" + str(contador)+"]"+"[\'\'\'"+str(arbol.etiqueta)+"]")
if(arbol is not None and arbol.esHoja is not None and arbol.esHoja =='N'):
if(arbol.etiqueta == 'select_colum_list'):
ts.operacion_actual = TS.TIPO_SELECT_CAMPOS.COLUMNAS
if(arbol is not None and arbol.esHoja is not None and arbol.esHoja =='S'):
#estamos en el subarbol de tablas
if(arbol.etiqueta == 'table_name_d'):
ts.operacion_actual = TS.TIPO_SELECT_CAMPOS.TABLAS
#region
if len(arbol.hijos) == 1:
id = inc()
#arbol.hijos[0].etiquetaPadre = arbol.etiqueta
if(arbol.etiqueta =='in_value_list'):
if(arbol.hijos[0].esHoja == 'S'):
ts.valor_temporal = arbol.hijos[0].lexema
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal
else:
valorRetorno = str(calcularDelete(arbol.hijos[0],ts))
if(arbol.etiqueta == 'WHERE_CONDITION'):
if(hasattr(ts, 'TIPO_SELECT_CONDICION') and ts.TIPO_SELECT_CONDICION == TS.TIPO_SELECT_CONDICION.COMPARACION) :
if(len(ts.lstcondiciones) == 0) :
ts.agregarCondicionDelete(ts.valor_temporal)
return id
elif len(arbol.hijos) == 2:
id = inc()
if(arbol.etiqueta == 'delete_statement'):
if(arbol.hijos[0].etiqueta == 'table_name_d'):
if(arbol.hijos[0].esHoja == 'S'):
temporal = str(arbol.hijos[0].lexema)
ts.agregarTablaDelete(temporal)
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal
elif(arbol.hijos[0].etiqueta == 'insert_columns_and_source'):
calcularDelete(arbol.hijos[0],ts)
else:
calcularDelete(arbol.hijos[0],ts)
if(arbol.hijos[1].etiqueta == 'table_name_d'):
if(arbol.hijos[1].esHoja == 'S'):
temporal = str(arbol.hijos[1].lexema)
ts.agregarTablaDelete(temporal)
else:
calcularDelete(arbol.hijos[1],ts)
temporal1 = ts.valor_temporal
elif(arbol.hijos[1].etiqueta == 'insert_columns_and_source'):
calcularDelete(arbol.hijos[1],ts)
else:
calcularDelete(arbol.hijos[1],ts)
elif(arbol.etiqueta == 'in_value_list'):
# str(calcularDelete(arbol.hijos[0],ts))
# temporal1 = ts.valor_temporal.valor
if(arbol.hijos[0].esHoja == 'S'):
temporal1 = str(arbol.hijos[0].lexema)
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[1],ts))
# temporal2 = ts.valor_temporal.valor
if(arbol.hijos[1].esHoja == 'S'):
temporal2 = str(arbol.hijos[1].lexema)
else:
calcularDelete(arbol.hijos[1],ts)
temporal2 = ts.valor_temporal
ts.valor_temporal = temporal1+','+temporal2
else:
valorRetorno1 = str(calcularDelete(arbol.hijos[0],ts))
valorRetorno2 = str(calcularDelete(arbol.hijos[1],ts))
elif len(arbol.hijos) == 3:
id = inc()
if(arbol.etiqueta == 'comparison_predicate'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.COMPARACION
#se valida si se puede obtener directamente la hoja o hay que sintetizar
# str(calcularDelete(arbol.hijos[0],ts))
# temporal1 = ts.valor_temporal.valor
if(arbol.hijos[0].etiqueta == 'value_expression'):
temporal1 = arbol.hijos[0].lexema
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal.valor
if(arbol.hijos[1].etiqueta == 'comp_op'):
temporal2 = arbol.hijos[1].lexema
else:
calcularDelete(arbol.hijos[1],ts)
temporal2 = ts.valor_temporal.valor
if(arbol.hijos[2].etiqueta == 'value_expression'):
temporal3 = arbol.hijos[2].lexema
elif(arbol.hijos[2].etiqueta == 'fun_now'):
temporal3 = 'now'
else:
calcularDelete(arbol.hijos[2],ts)
temporal3 = ts.valor_temporal.valor
#se almacena como un temporal porque probablemente existan mas item como lista
#valTemp = TS.ExpresionComparacion(temporal1,temporal2,temporal3,None,TS.TIPO_SELECT_CONDICION.COMPARACION)
#ts.valor_temporal.valor = TS.ValorTemporal(valTemp, None)
#expIn = TS.ExpresionComparacion(temporal1,temporal2,temporal3,None,TS.TIPO_SELECT_CONDICION.IN)
# ts.agregarCondicionDelete(expIn)
#ts.agregarCondicionDelete(expComparacion)
valTemp = TS.ExpresionComparacion(temporal1,temporal2,temporal3,None,TS.TIPO_SELECT_CONDICION.COMPARACION)
ts.valor_temporal = TS.ValorTemporal(valTemp, None)
elif(arbol.etiqueta == 'search_condition'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.COMPARACION
# valorRetorno1 = str(calcularDelete(arbol.hijos[0],ts))
# temporal1 = ts.valor_temporal.valor
if(arbol.hijos[0].esHoja == 'S'):
temporal1 = arbol.hijos[0].lexema
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal
#valorRetorno2 = str(calcularDelete(arbol.hijos[1],ts))
#temporal2 = arbol.hijos[1].lexema
if(arbol.hijos[1].esHoja == 'S'):
temporal2 = arbol.hijos[1].lexema
else:
calcularDelete(arbol.hijos[1],ts)
temporal2 = ts.valor_temporal
# valorRetorno3 = str(calcularDelete(arbol.hijos[2],ts))
# temporal3 = ts.valor_temporal.valor
if(arbol.hijos[2].esHoja == 'S'):
temporal3 = arbol.hijos[2].lexema
else:
calcularDelete(arbol.hijos[2],ts)
temporal3 = ts.valor_temporal
#Como es item unico se envía directamente a la lista de comparación
#expComparacion = TS.ExpresionListaComparadores(temporal1,temporal2,temporal3)
valTemp = TS.ExpresionComparacion(temporal1,temporal2,temporal3,None,TS.TIPO_SELECT_CONDICION.COMPARACION)
ts.valor_temporal.valor = TS.ValorTemporal(valTemp, None)
#ts.agregarCondicionDelete(expComparacion)
elif(arbol.etiqueta == 'boolean_term'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.COMPARACION
# valorRetorno1 = str(calcularDelete(arbol.hijos[0],ts))
# temporal1 = ts.valor_temporal.valor
if(arbol.hijos[0].esHoja == 'S'):
temporal1 = arbol.hijos[0].lexema
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal
# valorRetorno2 = str(calcularDelete(arbol.hijos[1],ts))
# temporal2 = ts.valor_temporal.valor
if(arbol.hijos[1].etiqueta == 'opAnd' or arbol.hijos[1].etiqueta == 'opOr'):
temporal2 = arbol.hijos[1].lexema
else:
calcularDelete(arbol.hijos[1],ts)
temporal2 = ts.valor_temporal.valor
# valorRetorno3 = str(calcularDelete(arbol.hijos[2],ts))
# temporal3 = ts.valor_temporal.valor
if(arbol.hijos[2].esHoja == 'S'):
temporal3 = arbol.hijos[2].lexema
else:
calcularDelete(arbol.hijos[2],ts)
temporal3 = ts.valor_temporal
expComparacion = TS.ExpresionComparacion(temporal1,temporal2,temporal3,None,TS.TIPO_SELECT_CONDICION.COMPARACION)
ts.valor_temporal = expComparacion
elif(arbol.etiqueta == 'in_predicate'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.IN
# str(calcularDelete(arbol.hijos[0],ts))
# temporal1 = ts.valor_temporal.valor
if(arbol.hijos[0].esHoja == 'S'):
temporal1 = arbol.hijos[0].lexema
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal
if(arbol.hijos[1].etiqueta == 'predicatein'):
temporal2 = arbol.hijos[1].lexema
else:
calcularDelete(arbol.hijos[1],ts)
temporal2 = ts.valor_temporal.valor
# str(calcularDelete(arbol.hijos[2],ts))
# temporal3 = ts.valor_temporal.valor
if(arbol.hijos[2].esHoja == 'S'):
temporal3 = arbol.hijos[2].lexema
else:
calcularDelete(arbol.hijos[2],ts)
temporal3 = ts.valor_temporal
expIn = TS.ExpresionComparacion(temporal1,temporal2,temporal3,None,TS.TIPO_SELECT_CONDICION.IN)
ts.agregarCondicionDelete(expIn)
# elif(arbol.etiqueta == 'null_predicate'):
# ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.NULL
# str(calcularDelete(arbol.hijos[0],ts))
# temporal1 = ts.valor_temporal.valor
# str(calcularDelete(arbol.hijos[1],ts))
# temporal2 = ts.valor_temporal.valor
# str(calcularDelete(arbol.hijos[2],ts))
# temporal3 = ts.valor_temporal.valor
# expIn = TS.ExpresionComparacion(temporal1,temporal2,temporal3,None,TS.TIPO_SELECT_CONDICION.NULL)
# ts.agregarCondicionDelete(expIn)
elif(arbol.etiqueta == 'like_percent_predicate'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.LIKE
#str(calcularDelete(arbol.hijos[0],ts))
temporal1 = arbol.hijos[0]
#str(calcularDelete(arbol.hijos[1],ts))
temporal2 = arbol.hijos[1]
#str(calcularDelete(arbol.hijos[2],ts))
temporal3 = arbol.hijos[2]
expIn = TS.ExpresionComparacion(temporal1,temporal2,temporal3,None,TS.TIPO_SELECT_CONDICION.LIKE)
ts.agregarCondicionDelete(expIn)
elif(arbol.etiqueta == 'column_reference'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.SUBSTRING
temporal1 = arbol.hijos[0].lexema
temporal2 = arbol.hijos[1].lexema
temporal3 = arbol.hijos[2].lexema
# str(calcularDelete(arbol.hijos[3],ts))
# temporal4 = ts.valor_temporal.valor
#se almacena como un temporal porque probablemente existan mas item como lista
ts.valor_temporal = TS.ExpresionComparacion(temporal1,temporal2,temporal3,None,TS.TIPO_SELECT_CONDICION.SUBSTRING)
else:
valorRetorno1 = str(calcularDelete(arbol.hijos[0],ts))
valorRetorno2 = str(calcularDelete(arbol.hijos[1],ts))
valorRetorno3 = str(calcularDelete(arbol.hijos[2],ts))
return id
#************************************
# ARBOL CON 4 HIJOS
#************************************
elif len(arbol.hijos) == 4:
id = inc()
if(arbol.etiqueta == 'null_predicate'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.NOT_NULL
# str(calcularDelete(arbol.hijos[0],ts))
# temporal1 = ts.valor_temporal.valor
if(arbol.hijos[0].esHoja == 'S'):
temporal1 = arbol.hijos[0].lexema
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[1],ts))
# temporal2 = ts.valor_temporal.valor
if(arbol.hijos[1].esHoja == 'S'):
temporal2 = arbol.hijos[1].lexema
else:
calcularDelete(arbol.hijos[1],ts)
temporal2 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[2],ts))
# temporal3 = ts.valor_temporal.valor
if(arbol.hijos[2].esHoja == 'S'):
temporal3 = arbol.hijos[2].lexema
else:
calcularDelete(arbol.hijos[2],ts)
temporal3 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[3],ts))
# temporal4 = ts.valor_temporal.valor
if(arbol.hijos[3].esHoja == 'S'):
temporal4 = arbol.hijos[3].lexema
else:
calcularDelete(arbol.hijos[3],ts)
temporal4 = ts.valor_temporal
expIn = TS.ExpresionComparacion(temporal1,temporal2,temporal3,temporal4,TS.TIPO_SELECT_CONDICION.NOT_NULL)
ts.agregarCondicionDelete(expIn)
elif(arbol.etiqueta == 'substring_predicate'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.SUBSTRING
if(arbol.hijos[0].esHoja == 'S'):
temporal1 = arbol.hijos[0].lexema
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[1],ts))
# temporal2 = ts.valor_temporal
if(arbol.hijos[1].esHoja == 'S'):
temporal2 = arbol.hijos[1].lexema
else:
calcularDelete(arbol.hijos[1],ts)
temporal2 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[2],ts))
# temporal3 = ts.valor_temporal
if(arbol.hijos[2].esHoja == 'S'):
temporal3 = arbol.hijos[2].lexema
else:
calcularDelete(arbol.hijos[2],ts)
temporal3 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[3],ts))
# temporal4 = ts.valor_temporal
if(arbol.hijos[3].esHoja == 'S'):
temporal4 = arbol.hijos[3].lexema
else:
calcularDelete(arbol.hijos[3],ts)
temporal4 = ts.valor_temporal
expIn = TS.ExpresionComparacion(temporal1,temporal2,temporal3,temporal4,TS.TIPO_SELECT_CONDICION.SUBSTRING)
ts.agregarCondicionDelete(expIn)
else:
valorRetorno1 = str(calcularDelete(arbol.hijos[0],ts))
valorRetorno2 = str(calcularDelete(arbol.hijos[1],ts))
valorRetorno3 = str(calcularDelete(arbol.hijos[2],ts))
valorRetorno4 = str(calcularDelete(arbol.hijos[3],ts))
return id
#************************************
# ARBOL CON 5 HIJOS
#************************************
elif len(arbol.hijos) == 5:
id = inc()
#region
if(arbol.etiqueta == 'between_predicate'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.BETWEEN
temporal1 = arbol.hijos[0].lexema
temporal2 = arbol.hijos[1].lexema
temporal3 = arbol.hijos[2].lexema
temporal4 = arbol.hijos[3].lexema
temporal5 = arbol.hijos[4].lexema
expComparacion = TS.ExpresionComparacion(temporal3,temporal1,temporal5,None,TS.TIPO_SELECT_CONDICION.BETWEEN)
ts.agregarCondicionDelete(expComparacion)
elif(arbol.etiqueta == 'distinct_predicate'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.DISTINCT
temporal1 = arbol.hijos[0].lexema
temporal2 = arbol.hijos[1].lexema
temporal3 = arbol.hijos[2].lexema
temporal4 = arbol.hijos[3].lexema
temporal5 = arbol.hijos[4].lexema
expComparacion = TS.ExpresionComparacion(temporal3,temporal1,temporal5,None,TS.TIPO_SELECT_CONDICION.DISTINCT)
ts.agregarCondicionDelete(expComparacion)
#endregion
else:
valorRetorno1 = str(calcularDelete(arbol.hijos[0],ts))
valorRetorno2 = str(calcularDelete(arbol.hijos[1],ts))
valorRetorno3 = str(calcularDelete(arbol.hijos[2],ts))
valorRetorno4 = str(calcularDelete(arbol.hijos[3],ts))
valorRetorno5 = str(calcularDelete(arbol.hijos[4],ts))
return id
#************************************
# ARBOL CON 6 HIJOS
#************************************
elif len(arbol.hijos) == 6:
id = inc()
#region
if(arbol.etiqueta == 'between_predicate'):
ts.TIPO_SELECT_CONDICION = TS.TIPO_SELECT_CONDICION.NOT_BETWEEN
# str(calcularDelete(arbol.hijos[0],ts))
# temporal1 = ts.valor_temporal.valor
if(arbol.hijos[0].esHoja == 'S'):
temporal1 = arbol.hijos[0].lexema
else:
calcularDelete(arbol.hijos[0],ts)
temporal1 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[1],ts))
# temporal2 = ts.valor_temporal.valor
if(arbol.hijos[1].esHoja == 'S'):
temporal2 = arbol.hijos[1].lexema
else:
calcularDelete(arbol.hijos[1],ts)
temporal2 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[2],ts))
# temporal3 = ts.valor_temporal.valor
if(arbol.hijos[2].esHoja == 'S'):
temporal3 = arbol.hijos[2].lexema
else:
calcularDelete(arbol.hijos[2],ts)
temporal3 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[3],ts))
# temporal4 = ts.valor_temporal.valor
if(arbol.hijos[3].esHoja == 'S'):
temporal4 = arbol.hijos[3].lexema
else:
calcularDelete(arbol.hijos[3],ts)
temporal4 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[4],ts))
# temporal5 = ts.valor_temporal.valor
if(arbol.hijos[4].esHoja == 'S'):
temporal5 = arbol.hijos[4].lexema
else:
calcularDelete(arbol.hijos[4],ts)
temporal5 = ts.valor_temporal
# str(calcularDelete(arbol.hijos[5],ts))
# temporal6 = ts.valor_temporal.valor
if(arbol.hijos[5].esHoja == 'S'):
temporal6 = arbol.hijos[5].lexema
else:
calcularDelete(arbol.hijos[5],ts)
temporal6 = ts.valor_temporal
expComparacion = TS.ExpresionComparacion(temporal4,temporal1,temporal6,None,TS.TIPO_SELECT_CONDICION.NOT_BETWEEN)
ts.agregarCondicionDelete(expComparacion)
#endregion
else:
valorRetorno1 = str(calcularDelete(arbol.hijos[0],ts))
valorRetorno2 = str(calcularDelete(arbol.hijos[1],ts))
valorRetorno3 = str(calcularDelete(arbol.hijos[2],ts))
valorRetorno4 = str(calcularDelete(arbol.hijos[3],ts))
valorRetorno5 = str(calcularDelete(arbol.hijos[4],ts))
valorRetorno6 = str(calcularDelete(arbol.hijos[5],ts))
| 42.253579
| 168
| 0.540219
| 2,000
| 20,662
| 5.4835
| 0.075
| 0.163217
| 0.190389
| 0.12802
| 0.878636
| 0.842892
| 0.805872
| 0.766755
| 0.743139
| 0.688429
| 0
| 0.027061
| 0.345417
| 20,662
| 488
| 169
| 42.340164
| 0.783808
| 0.183816
| 0
| 0.686469
| 0
| 0
| 0.030642
| 0.005711
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006601
| false
| 0
| 0.019802
| 0
| 0.042904
| 0.006601
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8179becce301ae759d6010ce93ca8a9af4f171cf
| 49,456
|
py
|
Python
|
pynetdicom3/tests/test_pdu.py
|
rdebroiz/pynetdicom3
|
0baea8310b9d3fd0a67df0c2e90f2607463f73c7
|
[
"MIT"
] | null | null | null |
pynetdicom3/tests/test_pdu.py
|
rdebroiz/pynetdicom3
|
0baea8310b9d3fd0a67df0c2e90f2607463f73c7
|
[
"MIT"
] | null | null | null |
pynetdicom3/tests/test_pdu.py
|
rdebroiz/pynetdicom3
|
0baea8310b9d3fd0a67df0c2e90f2607463f73c7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from io import BytesIO
import logging
import unittest
from pydicom.uid import UID
from pynetdicom3 import (
VerificationSOPClass, StorageSOPClassList, QueryRetrieveSOPClassList
)
from pynetdicom3.pdu import (
A_ASSOCIATE_RQ, A_ASSOCIATE_AC, A_ASSOCIATE_RJ, P_DATA_TF, A_RELEASE_RQ,
A_RELEASE_RP, A_ABORT_RQ, MaximumLengthSubItem,
ImplementationClassUIDSubItem, ImplementationVersionNameSubItem,
AsynchronousOperationsWindowSubItem, SCP_SCU_RoleSelectionSubItem,
SOPClassExtendedNegotiationSubItem,
SOPClassCommonExtendedNegotiationSubItem, UserIdentitySubItemRQ,
UserIdentitySubItemAC, PDU, ApplicationContextItem,
PresentationContextItemAC, PresentationContextItemRQ, UserInformationItem
)
from pynetdicom3.pdu_primitives import (
MaximumLengthNegotiation, ImplementationClassUIDNotification,
ImplementationVersionNameNotification, A_P_ABORT, A_ABORT, A_ASSOCIATE,
P_DATA
)
from .encoded_pdu_items import (
a_associate_rq, a_associate_ac, a_associate_rj, a_release_rq, a_release_rq,
a_release_rp, a_abort, a_p_abort, p_data_tf
)
#from pynetdicom3.utils import pretty_bytes
LOGGER = logging.getLogger('pynetdicom3')
LOGGER.setLevel(logging.CRITICAL)
class TestPDU(unittest.TestCase):
def test_length_property(self):
""" Check that the length property returns the correct value """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
self.assertEqual(pdu.length, pdu.get_length())
def test_decode(self):
"""Check that encode raises not implemented"""
pdu = A_ASSOCIATE_AC()
with self.assertRaises(NotImplementedError):
pdu.decode(a_associate_ac)
class TestPDU_NextItem(unittest.TestCase):
def test_unknown_item_type(self):
""" Check that an unknown item value raises ValueError """
s = BytesIO(b'\x00\x02\x03\x04\x04')
pdu = PDU()
self.assertRaises(ValueError, pdu._next_item, s)
def test_empty_stream(self):
""" Check that an empty stream returns None """
s = BytesIO(b'')
pdu = PDU()
item = pdu._next_item(s)
self.assertTrue(item is None)
def test_correct_item(self):
""" Check that stream returns correct item type """
pdu = PDU()
item = pdu._next_item(BytesIO(b'\x01'))
self.assertTrue(isinstance(item, A_ASSOCIATE_RQ))
item = pdu._next_item(BytesIO(b'\x02'))
self.assertTrue(isinstance(item, A_ASSOCIATE_AC))
item = pdu._next_item(BytesIO(b'\x10'))
self.assertTrue(isinstance(item, ApplicationContextItem))
class TestPDU_NextItemType(unittest.TestCase):
def test_empty_stream(self):
""" Check that an empty stream returns None """
s = BytesIO(b'')
pdu = PDU()
item_type = pdu._next_item_type(s)
self.assertTrue(item_type is None)
def test_normal_stream(self):
""" Check that a stream returns the value of the first byte """
s = BytesIO(b'\x01\x02\x03\x04\x04')
pdu = PDU()
item_type = pdu._next_item_type(s)
self.assertTrue(item_type == 1)
def test_return_type(self):
""" Check stream returns the value of the first byte as an int """
s = BytesIO(b'\x01\x02\x03\x04\x04')
pdu = PDU()
item_type = pdu._next_item_type(s)
self.assertTrue(isinstance(item_type, int))
class TestPDU_Equality(unittest.TestCase):
"""Test the PDU equality/inequality operators."""
def test_equality(self):
"""Test the equality operator"""
self.assertTrue(PDU() == PDU())
self.assertFalse(PDU() == 'TEST')
pdu = PDU()
pdu.formats = ['a']
self.assertFalse(pdu == PDU())
def test_inequality(self):
"""Test the inequality operator"""
self.assertFalse(PDU() != PDU())
self.assertTrue(PDU() != 'TEST')
pdu = PDU()
pdu.formats = ['a']
self.assertTrue(pdu != PDU())
class TestPDU_A_ASSOC_RQ(unittest.TestCase):
"""Test the A_ASSOCIATE_RQ class."""
def test_property_setters(self):
"""Check the property setters are working correctly."""
# pdu.application_context_name
pdu = A_ASSOCIATE_RQ()
item = ApplicationContextItem()
pdu.variable_items = [item]
self.assertEqual(pdu.application_context_name, '')
pdu.application_context_name = 'TEST'
self.assertEqual(pdu.application_context_name, 'TEST')
# pdu.presentation_context
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
role_selection = SCP_SCU_RoleSelectionSubItem()
role_selection.sop_class_uid = '1.2.840.10008.1.1'
role_selection.scu_role = 1
role_selection.scp_role = 1
pdu.user_information.user_data.append(role_selection)
context = pdu.presentation_context[0]
self.assertTrue(context.SCP == 1)
self.assertTrue(context.SCU == 1)
def test_string_output(self):
"""Check the string output works"""
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
self.assertTrue("Verification SOP Class" in pdu.__str__())
self.assertTrue("Implicit VR Little Endian" in pdu.__str__())
self.assertTrue("3680043.9.3811.0.9.0" in pdu.__str__())
def test_stream_decode_values_types(self):
""" Check decoding the assoc_rq stream produces the correct objects """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
self.assertEqual(pdu.pdu_type, 0x01)
self.assertEqual(pdu.pdu_length, 209)
self.assertEqual(pdu.protocol_version, 0x0001)
self.assertTrue(isinstance(pdu.pdu_type, int))
self.assertTrue(isinstance(pdu.pdu_length, int))
self.assertTrue(isinstance(pdu.protocol_version, int))
# Check VariableItems
# The actual items will be tested separately
self.assertTrue(isinstance(pdu.variable_items[0], ApplicationContextItem))
self.assertTrue(isinstance(pdu.variable_items[1], PresentationContextItemRQ))
self.assertTrue(isinstance(pdu.variable_items[2], UserInformationItem))
def test_decode_properties(self):
""" Check decoding the assoc_rq stream produces the correct properties """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
# Check AE titles
self.assertEqual(pdu.calling_ae_title.decode('utf-8'), 'ECHOSCU ')
self.assertEqual(pdu.called_ae_title.decode('utf-8'), 'ANY-SCP ')
self.assertTrue(isinstance(pdu.calling_ae_title, bytes))
self.assertTrue(isinstance(pdu.called_ae_title, bytes))
# Check application_context_name property
app_name = pdu.application_context_name
self.assertTrue(isinstance(app_name, UID))
self.assertEqual(app_name, '1.2.840.10008.3.1.1.1')
# Check presentation_context property
contexts = pdu.presentation_context
self.assertTrue(isinstance(contexts, list))
for context in contexts:
self.assertTrue(isinstance(context, PresentationContextItemRQ))
# Check user_information property
user_info = pdu.user_information
self.assertTrue(isinstance(user_info, UserInformationItem))
def test_new_encode(self):
""" Check encoding using new generic method """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
s = pdu.encode()
self.assertEqual(s, a_associate_rq)
def test_stream_encode(self):
""" Check encoding an assoc_rq produces the correct output """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
s = pdu.Encode()
self.assertEqual(s, a_associate_rq)
def test_to_primitive(self):
""" Check converting PDU to primitive """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
primitive = pdu.ToParams()
self.assertEqual(primitive.application_context_name, UID('1.2.840.10008.3.1.1.1'))
self.assertEqual(primitive.calling_ae_title, b'ECHOSCU ')
self.assertEqual(primitive.called_ae_title, b'ANY-SCP ')
# Test User Information
for item in primitive.user_information:
# Maximum PDU Length (required)
if isinstance(item, MaximumLengthNegotiation):
self.assertEqual(item.maximum_length_received, 16382)
self.assertTrue(isinstance(item.maximum_length_received, int))
# Implementation Class UID (required)
elif isinstance(item, ImplementationClassUIDNotification):
self.assertEqual(item.implementation_class_uid, UID('1.2.826.0.1.3680043.9.3811.0.9.0'))
self.assertTrue(isinstance(item.implementation_class_uid, UID))
# Implementation Version Name (optional)
elif isinstance(item, ImplementationVersionNameNotification):
self.assertEqual(item.implementation_version_name, b'PYNETDICOM_090')
self.assertTrue(isinstance(item.implementation_version_name, bytes))
# Test Presentation Contexts
for context in primitive.presentation_context_definition_list:
self.assertEqual(context.ID, 1)
self.assertEqual(context.AbstractSyntax, UID('1.2.840.10008.1.1'))
for syntax in context.TransferSyntax:
self.assertEqual(syntax, UID('1.2.840.10008.1.2'))
self.assertTrue(isinstance(primitive.application_context_name, UID))
self.assertTrue(isinstance(primitive.calling_ae_title, bytes))
self.assertTrue(isinstance(primitive.called_ae_title, bytes))
self.assertTrue(isinstance(primitive.user_information, list))
self.assertTrue(isinstance(primitive.presentation_context_definition_list, list))
# Not used by A-ASSOCIATE-RQ or fixed value
self.assertEqual(primitive.mode, "normal")
self.assertEqual(primitive.responding_ae_title, primitive.called_ae_title)
self.assertEqual(primitive.result, None)
self.assertEqual(primitive.result_source, None)
self.assertEqual(primitive.diagnostic, None)
self.assertEqual(primitive.calling_presentation_address, None)
self.assertEqual(primitive.called_presentation_address, None)
self.assertEqual(primitive.responding_presentation_address, primitive.called_presentation_address)
self.assertEqual(primitive.presentation_context_definition_results_list, [])
self.assertEqual(primitive.presentation_requirements, "Presentation Kernel")
self.assertEqual(primitive.session_requirements, "")
def test_from_primitive(self):
""" Check converting PDU to primitive """
orig_pdu = A_ASSOCIATE_RQ()
orig_pdu.Decode(a_associate_rq)
primitive = orig_pdu.ToParams()
new_pdu = A_ASSOCIATE_RQ()
new_pdu.FromParams(primitive)
self.assertEqual(new_pdu, orig_pdu)
def test_update_data(self):
""" Check that updating the PDU data works correctly """
orig_pdu = A_ASSOCIATE_RQ()
orig_pdu.Decode(a_associate_rq)
orig_pdu.user_information.user_data = [orig_pdu.user_information.user_data[1]]
orig_pdu.get_length()
primitive = orig_pdu.ToParams()
new_pdu = A_ASSOCIATE_RQ()
new_pdu.FromParams(primitive)
self.assertEqual(new_pdu, orig_pdu)
def test_generic_encode(self):
""" Check using the new pdu.encode produces the correct output """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
s = pdu.Encode()
t = pdu.encode()
self.assertEqual(s, t)
class TestPDU_A_ASSOC_RQ_ApplicationContext(unittest.TestCase):
def test_stream_decode_values_types(self):
""" Check decoding an assoc_rq produces the correct application context """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
app_context = pdu.variable_items[0]
self.assertEqual(app_context.item_type, 0x10)
self.assertEqual(app_context.item_length, 21)
self.assertEqual(app_context.application_context_name, '1.2.840.10008.3.1.1.1')
self.assertTrue(isinstance(app_context.item_type, int))
self.assertTrue(isinstance(app_context.item_length, int))
self.assertTrue(isinstance(app_context.application_context_name, UID))
self.assertEqual(app_context.application_context_name, '1.2.840.10008.3.1.1.1')
self.assertTrue(isinstance(app_context.application_context_name, UID))
class TestPDU_A_ASSOC_RQ_PresentationContext(unittest.TestCase):
def test_stream_decode_values_types(self):
""" Check decoding an assoc_rq produces the correct presentation context """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
# Check PresentationContextItemRQ attributes
presentation_context = pdu.variable_items[1]
self.assertEqual(presentation_context.item_type, 0x20)
self.assertEqual(presentation_context.presentation_context_id, 0x001)
self.assertEqual(presentation_context.item_length, 46)
self.assertTrue(isinstance(presentation_context.item_type, int))
self.assertTrue(isinstance(presentation_context.presentation_context_id, int))
self.assertTrue(isinstance(presentation_context.item_length, int))
def test_decode_properties(self):
""" Check decoding the stream produces the correct properties """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
context = pdu.presentation_context[0]
# Check ID property
context_id = context.ID
self.assertTrue(isinstance(context_id, int))
self.assertEqual(context_id, 1)
# Check Abstract Syntax property
context = pdu.presentation_context[0]
self.assertTrue(isinstance(context.abstract_syntax, UID))
self.assertEqual(context.abstract_syntax, UID('1.2.840.10008.1.1'))
# Check TransferSyntax property is a list
self.assertTrue(isinstance(context.transfer_syntax, list))
# Check TransferSyntax list contains transfer syntax type UIDs
for syntax in pdu.presentation_context[0].transfer_syntax:
self.assertTrue(isinstance(syntax, UID))
self.assertTrue(syntax.is_transfer_syntax)
# Check first transfer syntax is little endian implicit
syntax = pdu.presentation_context[0].transfer_syntax[0]
self.assertEqual(syntax, UID('1.2.840.10008.1.2'))
class TestPDU_A_ASSOC_RQ_PresentationContext_AbstractSyntax(unittest.TestCase):
def test_decode_value_type(self):
""" Check decoding an assoc_rq produces the correct abstract syntax """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
context = pdu.presentation_context[0]
abstract_syntax = context.abstract_transfer_syntax_sub_items[0]
self.assertEqual(abstract_syntax.item_type, 0x30)
self.assertEqual(abstract_syntax.item_length, 17)
self.assertEqual(abstract_syntax.abstract_syntax_name, UID('1.2.840.10008.1.1'))
self.assertTrue(isinstance(abstract_syntax.item_type, int))
self.assertTrue(isinstance(abstract_syntax.item_length, int))
self.assertTrue(isinstance(abstract_syntax.abstract_syntax_name, UID))
class TestPDU_A_ASSOC_RQ_PresentationContext_TransferSyntax(unittest.TestCase):
def test_decode_value_type(self):
""" Check decoding an assoc_rq produces the correct transfer syntax """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
context = pdu.presentation_context[0]
transfer_syntaxes = context.transfer_syntax
# Check TransferSyntax property is a list
self.assertTrue(isinstance(transfer_syntaxes, list))
# Check TransferSyntax list contains transfer syntax type UIDs
for syntax in transfer_syntaxes:
self.assertTrue(isinstance(syntax, UID))
self.assertTrue(syntax.is_transfer_syntax)
# Check first transfer syntax is little endian implicit
syntax = transfer_syntaxes[0]
self.assertEqual(syntax, UID('1.2.840.10008.1.2'))
class TestPDU_A_ASSOC_RQ_UserInformation(unittest.TestCase):
def test_decode_value_type(self):
""" Check decoding an assoc_rq produces the correct user information """
pdu = A_ASSOCIATE_RQ()
pdu.Decode(a_associate_rq)
user_info = pdu.variable_items[2]
self.assertEqual(user_info.item_type, 0x50)
self.assertEqual(user_info.item_length, 62)
self.assertTrue(isinstance(user_info.item_type, int))
self.assertTrue(isinstance(user_info.item_length, int))
self.assertTrue(isinstance(user_info.user_data, list))
# Test user items
for item in user_info.user_data:
# Maximum PDU Length (required)
if isinstance(item, MaximumLengthSubItem):
self.assertEqual(item.maximum_length_received, 16382)
self.assertEqual(user_info.maximum_length, 16382)
self.assertTrue(isinstance(item.maximum_length_received, int))
self.assertTrue(isinstance(user_info.maximum_length, int))
# Implementation Class UID (required)
elif isinstance(item, ImplementationClassUIDSubItem):
self.assertEqual(item.item_type, 0x52)
self.assertEqual(item.item_length, 32)
self.assertEqual(item.implementation_class_uid, UID('1.2.826.0.1.3680043.9.3811.0.9.0'))
self.assertTrue(isinstance(item.item_type, int))
self.assertTrue(isinstance(item.item_length, int))
self.assertTrue(isinstance(item.implementation_class_uid, UID))
# Implementation Version Name (optional)
elif isinstance(item, ImplementationVersionNameSubItem):
self.assertEqual(item.item_type, 0x55)
self.assertEqual(item.item_length, 14)
self.assertEqual(item.implementation_version_name, b'PYNETDICOM_090')
self.assertTrue(isinstance(item.item_type, int))
self.assertTrue(isinstance(item.item_length, int))
self.assertTrue(isinstance(item.implementation_version_name, bytes))
class TestPDU_A_ASSOC_AC(unittest.TestCase):
def test_property_setters(self):
"""Test the property setters"""
# presentation_context
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
role_selection = SCP_SCU_RoleSelectionSubItem()
role_selection.sop_class_uid = '1.2.840.10008.1.1'
role_selection.scu_role = 1
role_selection.scp_role = 1
pdu.user_information.user_data.append(role_selection)
context = pdu.presentation_context[0]
self.assertTrue(context.transfer_syntax == '1.2.840.10008.1.2')
def test_property_getters(self):
"""Test the property getters"""
# called_ae_title
pdu = A_ASSOCIATE_AC()
pdu.reserved_aet = b'TESTA'
self.assertEqual(pdu.called_ae_title, b'TESTA')
self.assertTrue(isinstance(pdu.called_ae_title, bytes))
pdu.reserved_aet = 'TESTB'
self.assertEqual(pdu.called_ae_title, b'TESTB')
self.assertTrue(isinstance(pdu.called_ae_title, bytes))
# calling_ae_title
pdu = A_ASSOCIATE_AC()
pdu.reserved_aec = b'TESTA'
self.assertEqual(pdu.calling_ae_title, b'TESTA')
self.assertTrue(isinstance(pdu.calling_ae_title, bytes))
pdu.reserved_aec = 'TESTB'
self.assertEqual(pdu.calling_ae_title, b'TESTB')
self.assertTrue(isinstance(pdu.calling_ae_title, bytes))
def test_string_output(self):
"""Test the string output"""
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
self.assertTrue("Implicit VR Little Endian" in pdu.__str__())
self.assertTrue("1.2.276.0.7230010" in pdu.__str__())
def test_stream_decode_values_types(self):
""" Check decoding the assoc_ac stream produces the correct objects """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
self.assertEqual(pdu.pdu_type, 0x02)
self.assertEqual(pdu.pdu_length, 184)
self.assertEqual(pdu.protocol_version, 0x0001)
self.assertTrue(isinstance(pdu.pdu_type, int))
self.assertTrue(isinstance(pdu.pdu_length, int))
self.assertTrue(isinstance(pdu.protocol_version, int))
# Check VariableItems
# The actual items will be tested separately
self.assertTrue(isinstance(pdu.variable_items[0], ApplicationContextItem))
self.assertTrue(isinstance(pdu.variable_items[1], PresentationContextItemAC))
self.assertTrue(isinstance(pdu.variable_items[2], UserInformationItem))
def test_decode_properties(self):
""" Check decoding the assoc_ac stream produces the correct properties """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
# Check AE titles
self.assertEqual(pdu.reserved_aec.decode('utf-8'), 'ECHOSCU ')
self.assertEqual(pdu.reserved_aet.decode('utf-8'), 'ANY-SCP ')
self.assertTrue(isinstance(pdu.reserved_aec, bytes))
self.assertTrue(isinstance(pdu.reserved_aet, bytes))
# Check application_context_name property
app_name = pdu.application_context_name
self.assertTrue(isinstance(app_name, UID))
self.assertEqual(app_name, '1.2.840.10008.3.1.1.1')
# Check presentation_context property
contexts = pdu.presentation_context
self.assertTrue(isinstance(contexts, list))
for context in contexts:
self.assertTrue(isinstance(context, PresentationContextItemAC))
# Check user_information property
user_info = pdu.user_information
self.assertTrue(isinstance(user_info, UserInformationItem))
def test_stream_encode(self):
""" Check encoding an assoc_ac produces the correct output """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
s = pdu.Encode()
self.assertEqual(s, a_associate_ac)
def test_new_encode(self):
""" Check encoding using new generic method """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
s = pdu.encode()
self.assertEqual(s, a_associate_ac)
def test_to_primitive(self):
""" Check converting PDU to primitive """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
primitive = pdu.ToParams()
self.assertEqual(primitive.application_context_name, UID('1.2.840.10008.3.1.1.1'))
self.assertEqual(primitive.calling_ae_title, b'ECHOSCU ')
self.assertEqual(primitive.called_ae_title, b'ANY-SCP ')
# Test User Information
for item in primitive.user_information:
# Maximum PDU Length (required)
if isinstance(item, MaximumLengthNegotiation):
self.assertEqual(item.maximum_length_received, 16384)
self.assertTrue(isinstance(item.maximum_length_received, int))
# Implementation Class UID (required)
elif isinstance(item, ImplementationClassUIDNotification):
self.assertEqual(item.implementation_class_uid, UID('1.2.276.0.7230010.3.0.3.6.0'))
self.assertTrue(isinstance(item.implementation_class_uid, UID))
# Implementation Version Name (optional)
elif isinstance(item, ImplementationVersionNameNotification):
self.assertEqual(item.implementation_version_name, b'OFFIS_DCMTK_360')
self.assertTrue(isinstance(item.implementation_version_name, bytes))
# Test Presentation Contexts
for context in primitive.presentation_context_definition_list:
self.assertEqual(context.ID, 1)
self.assertEqual(context.TransferSyntax[0], UID('1.2.840.10008.1.2'))
self.assertTrue(isinstance(primitive.application_context_name, UID))
self.assertTrue(isinstance(primitive.calling_ae_title, bytes))
self.assertTrue(isinstance(primitive.called_ae_title, bytes))
self.assertTrue(isinstance(primitive.user_information, list))
self.assertEqual(primitive.result, 0)
self.assertEqual(len(primitive.presentation_context_definition_results_list), 1)
# Not used by A-ASSOCIATE-AC or fixed value
self.assertEqual(primitive.mode, "normal")
self.assertEqual(primitive.responding_ae_title, primitive.called_ae_title)
self.assertEqual(primitive.result_source, None)
self.assertEqual(primitive.diagnostic, None)
self.assertEqual(primitive.calling_presentation_address, None)
self.assertEqual(primitive.called_presentation_address, None)
self.assertEqual(primitive.responding_presentation_address, primitive.called_presentation_address)
self.assertEqual(primitive.presentation_context_definition_list, [])
self.assertEqual(primitive.presentation_requirements, "Presentation Kernel")
self.assertEqual(primitive.session_requirements, "")
def test_from_primitive(self):
""" Check converting PDU to primitive """
orig = A_ASSOCIATE_AC()
orig.Decode(a_associate_ac)
primitive = orig.ToParams()
new = A_ASSOCIATE_AC()
new.FromParams(primitive)
self.assertEqual(new, orig)
def test_update_data(self):
""" Check that updating the PDU data works correctly """
original = A_ASSOCIATE_AC()
original.Decode(a_associate_ac)
original.user_information.user_data = [original.user_information.user_data[1]]
original.get_length()
primitive = original.ToParams()
new = A_ASSOCIATE_AC()
new.FromParams(primitive)
self.assertEqual(original, new)
def test_generic_encode(self):
""" Check using the new pdu.encode produces the correct output """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
s = pdu.Encode()
t = pdu.encode()
self.assertEqual(s, t)
class TestPDU_A_ASSOC_AC_ApplicationContext(unittest.TestCase):
def test_stream_decode_values_types(self):
""" Check decoding an assoc_ac produces the correct application context """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
app_context = pdu.variable_items[0]
self.assertEqual(app_context.item_type, 0x10)
self.assertEqual(app_context.item_length, 21)
self.assertEqual(app_context.application_context_name, '1.2.840.10008.3.1.1.1')
self.assertTrue(isinstance(app_context.item_type, int))
self.assertTrue(isinstance(app_context.item_length, int))
self.assertTrue(isinstance(app_context.application_context_name, UID))
self.assertEqual(app_context.application_context_name, '1.2.840.10008.3.1.1.1')
self.assertTrue(isinstance(app_context.application_context_name, UID))
class TestPDU_A_ASSOC_AC_PresentationContext(unittest.TestCase):
def test_stream_decode_values_types(self):
""" Check decoding an assoc_ac produces the correct presentation context """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
# Check PresentationContextItemRQ attributes
presentation_context = pdu.variable_items[1]
self.assertEqual(presentation_context.item_type, 0x21)
self.assertEqual(presentation_context.presentation_context_id, 0x0001)
self.assertEqual(presentation_context.item_length, 25)
self.assertEqual(presentation_context.result_reason, 0)
self.assertTrue(isinstance(presentation_context.item_type, int))
self.assertTrue(isinstance(presentation_context.presentation_context_id, int))
self.assertTrue(isinstance(presentation_context.item_length, int))
def test_decode_properties(self):
""" Check decoding the stream produces the correct properties """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
context = pdu.presentation_context[0]
# Check ID property
context_id = context.ID
self.assertTrue(isinstance(context_id, int))
self.assertEqual(context_id, 1)
# Check Result
result = pdu.presentation_context[0].result_reason
self.assertEqual(result, 0)
self.assertTrue(isinstance(result, int))
# Check transfer syntax
syntax = pdu.presentation_context[0].transfer_syntax
self.assertTrue(syntax.is_transfer_syntax)
self.assertTrue(isinstance(syntax, UID))
self.assertEqual(syntax, UID('1.2.840.10008.1.2'))
class TestPDU_A_ASSOC_AC_PresentationContext_TransferSyntax(unittest.TestCase):
def test_decode_value_type(self):
""" Check decoding an assoc_ac produces the correct transfer syntax """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
context = pdu.presentation_context[0]
syntax = context.transfer_syntax
self.assertTrue(isinstance(syntax, UID))
self.assertTrue(syntax.is_transfer_syntax)
self.assertEqual(syntax, UID('1.2.840.10008.1.2'))
class TestPDU_A_ASSOC_AC_UserInformation(unittest.TestCase):
def test_decode_value_type(self):
""" Check decoding an assoc_rq produces the correct user information """
pdu = A_ASSOCIATE_AC()
pdu.Decode(a_associate_ac)
user_info = pdu.variable_items[2]
self.assertEqual(user_info.item_type, 0x50)
self.assertEqual(user_info.item_length, 58)
self.assertTrue(isinstance(user_info.item_type, int))
self.assertTrue(isinstance(user_info.item_length, int))
self.assertTrue(isinstance(user_info.user_data, list))
# Test user items
for item in user_info.user_data:
# Maximum PDU Length (required)
if isinstance(item, MaximumLengthSubItem):
self.assertEqual(item.maximum_length_received, 16384)
self.assertEqual(user_info.maximum_length, 16384)
self.assertTrue(isinstance(item.maximum_length_received, int))
self.assertTrue(isinstance(user_info.maximum_length, int))
# Implementation Class UID (required)
elif isinstance(item, ImplementationClassUIDSubItem):
self.assertEqual(item.item_type, 0x52)
self.assertEqual(item.item_length, 27)
self.assertEqual(item.implementation_class_uid, UID('1.2.276.0.7230010.3.0.3.6.0'))
self.assertTrue(isinstance(item.item_type, int))
self.assertTrue(isinstance(item.item_length, int))
self.assertTrue(isinstance(item.implementation_class_uid, UID))
# Implementation Version Name (optional)
elif isinstance(item, ImplementationVersionNameSubItem):
self.assertEqual(item.item_type, 0x55)
self.assertEqual(item.item_length, 15)
self.assertEqual(item.implementation_version_name, b'OFFIS_DCMTK_360')
self.assertTrue(isinstance(item.item_type, int))
self.assertTrue(isinstance(item.item_length, int))
self.assertTrue(isinstance(item.implementation_version_name, bytes))
class TestPDU_A_ASSOC_RJ(unittest.TestCase):
def test_string_output(self):
"""Test the string output"""
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
self.assertTrue("Rejected (Permanent)" in pdu.__str__())
self.assertTrue("DUL service-user" in pdu.__str__())
def test_stream_decode_values_types(self):
""" Check decoding the assoc_rj stream produces the correct objects """
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
self.assertEqual(pdu.pdu_type, 0x03)
self.assertEqual(pdu.pdu_length, 4)
self.assertTrue(isinstance(pdu.pdu_type, int))
self.assertTrue(isinstance(pdu.pdu_length, int))
def test_decode_properties(self):
""" Check decoding the assoc_rj stream produces the correct properties """
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
# Check reason/source/result
self.assertEqual(pdu.result, 1)
self.assertEqual(pdu.reason_diagnostic, 1)
self.assertEqual(pdu.source, 1)
self.assertTrue(isinstance(pdu.result, int))
self.assertTrue(isinstance(pdu.reason_diagnostic, int))
self.assertTrue(isinstance(pdu.source, int))
def test_stream_encode(self):
""" Check encoding an assoc_rj produces the correct output """
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
s = pdu.Encode()
self.assertEqual(s, a_associate_rj)
def test_new_encode(self):
""" Check encoding using new generic method """
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
s = pdu.encode()
self.assertEqual(s, a_associate_rj)
def test_to_primitive(self):
""" Check converting PDU to primitive """
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
primitive = pdu.ToParams()
self.assertEqual(primitive.result, 1)
self.assertEqual(primitive.result_source, 1)
self.assertEqual(primitive.diagnostic, 1)
self.assertTrue(isinstance(primitive.result, int))
self.assertTrue(isinstance(primitive.result_source, int))
self.assertTrue(isinstance(primitive.diagnostic, int))
# Not used by A-ASSOCIATE-RJ or fixed value
self.assertEqual(primitive.mode, "normal")
self.assertEqual(primitive.application_context_name, None)
self.assertEqual(primitive.calling_ae_title, None)
self.assertEqual(primitive.called_ae_title, None)
self.assertEqual(primitive.responding_ae_title, None)
self.assertEqual(primitive.user_information, [])
self.assertEqual(primitive.calling_presentation_address, None)
self.assertEqual(primitive.called_presentation_address, None)
self.assertEqual(primitive.responding_presentation_address, primitive.called_presentation_address)
self.assertEqual(primitive.presentation_context_definition_list, [])
self.assertEqual(primitive.presentation_context_definition_results_list, [])
self.assertEqual(primitive.presentation_requirements, "Presentation Kernel")
self.assertEqual(primitive.session_requirements, "")
def test_from_primitive(self):
""" Check converting PDU to primitive """
orig_pdu = A_ASSOCIATE_RJ()
orig_pdu.Decode(a_associate_rj)
primitive = orig_pdu.ToParams()
new_pdu = A_ASSOCIATE_RJ()
new_pdu.FromParams(primitive)
self.assertEqual(new_pdu, orig_pdu)
def test_update_data(self):
""" Check that updating the PDU data works correctly """
orig_pdu = A_ASSOCIATE_RJ()
orig_pdu.Decode(a_associate_rj)
orig_pdu.source = 2
orig_pdu.reason_diagnostic = 2
orig_pdu.result = 2
orig_pdu.get_length()
primitive = orig_pdu.ToParams()
new_pdu = A_ASSOCIATE_RJ()
new_pdu.FromParams(primitive)
self.assertEqual(new_pdu, orig_pdu)
def test_result_str(self):
""" Check the result str returns correct values """
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
pdu.result = 0
with self.assertRaises(ValueError): pdu.result_str
pdu.result = 1
self.assertEqual(pdu.result_str, 'Rejected (Permanent)')
pdu.result = 2
self.assertEqual(pdu.result_str, 'Rejected (Transient)')
pdu.result = 3
with self.assertRaises(ValueError): pdu.result_str
def test_source_str(self):
""" Check the source str returns correct values """
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
pdu.source = 0
with self.assertRaises(ValueError): pdu.source_str
pdu.source = 1
self.assertEqual(pdu.source_str, 'DUL service-user')
pdu.source = 2
self.assertEqual(pdu.source_str, 'DUL service-provider (ACSE related)')
pdu.source = 3
self.assertEqual(pdu.source_str, 'DUL service-provider (presentation related)')
pdu.source = 4
with self.assertRaises(ValueError): pdu.source_str
def test_reason_str(self):
""" Check the reason str returns correct values """
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
pdu.source = 0
with self.assertRaises(ValueError): pdu.reason_str
pdu.source = 1
for ii in range(1, 11):
pdu.reason_diagnostic = ii
self.assertTrue(isinstance(pdu.reason_str, str))
pdu.reason_diagnostic = 11
with self.assertRaises(ValueError): pdu.reason_str
pdu.source = 2
for ii in range(1, 3):
pdu.reason_diagnostic = ii
self.assertTrue(isinstance(pdu.reason_str, str))
pdu.reason_diagnostic = 3
with self.assertRaises(ValueError): pdu.reason_str
pdu.source = 3
for ii in range(1, 8):
pdu.reason_diagnostic = ii
self.assertTrue(isinstance(pdu.reason_str, str))
pdu.reason_diagnostic = 8
with self.assertRaises(ValueError): pdu.reason_str
pdu.source = 4
with self.assertRaises(ValueError): pdu.reason_str
def test_generic_encode(self):
""" Check using the new pdu.encode produces the correct output """
pdu = A_ASSOCIATE_RJ()
pdu.Decode(a_associate_rj)
s = pdu.Encode()
t = pdu.encode()
self.assertEqual(s, t)
class TestPDU_P_DATA_TF(unittest.TestCase):
def test_string_output(self):
"""Test the string output"""
pdu = P_DATA_TF()
pdu.Decode(p_data_tf)
self.assertTrue("80 bytes" in pdu.__str__())
self.assertTrue("0x03 0x00" in pdu.__str__())
def test_stream_decode_values_types(self):
""" Check decoding the p_data stream produces the correct objects """
pdu = P_DATA_TF()
pdu.Decode(p_data_tf)
self.assertEqual(pdu.pdu_type, 0x04)
self.assertEqual(pdu.pdu_length, 84)
self.assertTrue(isinstance(pdu.pdu_type, int))
self.assertTrue(isinstance(pdu.pdu_length, int))
def test_decode_properties(self):
""" Check decoding the p_data stream produces the correct properties """
pdu = P_DATA_TF()
pdu.Decode(p_data_tf)
# Check PDVs
self.assertTrue(isinstance(pdu.PDVs, list))
self.assertEqual(pdu.get_length(), 90)
def test_stream_encode(self):
""" Check encoding an p_data produces the correct output """
pdu = P_DATA_TF()
pdu.Decode(p_data_tf)
s = pdu.Encode()
self.assertEqual(s, p_data_tf)
def test_new_encode(self):
""" Check encoding using new generic method """
pdu = P_DATA_TF()
pdu.Decode(p_data_tf)
s = pdu.encode()
self.assertEqual(s, p_data_tf)
def test_to_primitive(self):
""" Check converting PDU to primitive """
pdu = P_DATA_TF()
pdu.Decode(p_data_tf)
primitive = pdu.ToParams()
self.assertEqual(primitive.presentation_data_value_list, [[1, p_data_tf[11:]]])
self.assertTrue(isinstance(primitive.presentation_data_value_list, list))
def test_from_primitive(self):
""" Check converting PDU to primitive """
orig_pdu = P_DATA_TF()
orig_pdu.Decode(p_data_tf)
primitive = orig_pdu.ToParams()
new_pdu = P_DATA_TF()
new_pdu.FromParams(primitive)
self.assertEqual(new_pdu, orig_pdu)
def test_generic_encode(self):
""" Check using the new pdu.encode produces the correct output """
pdu = P_DATA_TF()
pdu.Decode(p_data_tf)
s = pdu.Encode()
t = pdu.encode()
self.assertEqual(s, t)
class TestPDU_A_RELEASE_RQ(unittest.TestCase):
def test_string_output(self):
"""Test the string output"""
pdu = A_RELEASE_RQ()
pdu.Decode(a_release_rq)
self.assertTrue("0x05" in pdu.__str__())
self.assertTrue("10 bytes" in pdu.__str__())
def test_stream_decode_values_types(self):
""" Check decoding the release_rq stream produces the correct objects """
pdu = A_RELEASE_RQ()
pdu.Decode(a_release_rq)
self.assertEqual(pdu.pdu_type, 0x05)
self.assertEqual(pdu.pdu_length, 4)
self.assertEqual(pdu.get_length(), 10)
self.assertTrue(isinstance(pdu.pdu_type, int))
self.assertTrue(isinstance(pdu.pdu_length, int))
def test_stream_encode(self):
""" Check encoding an release_rq produces the correct output """
pdu = A_RELEASE_RQ()
pdu.Decode(a_release_rq)
s = pdu.Encode()
self.assertEqual(s, a_release_rq)
def test_new_encode(self):
""" Check encoding using new generic method """
pdu = A_RELEASE_RQ()
pdu.Decode(a_release_rq)
s = pdu.encode()
self.assertEqual(s, a_release_rq)
def test_to_primitive(self):
""" Check converting PDU to primitive """
pdu = A_RELEASE_RQ()
pdu.Decode(a_release_rq)
primitive = pdu.ToParams()
self.assertEqual(primitive.reason, "normal")
self.assertEqual(primitive.result, None)
def test_from_primitive(self):
""" Check converting PDU to primitive """
orig_pdu = A_RELEASE_RQ()
orig_pdu.Decode(a_release_rq)
primitive = orig_pdu.ToParams()
new_pdu = A_RELEASE_RQ()
new_pdu.FromParams(primitive)
self.assertEqual(new_pdu, orig_pdu)
def test_generic_encode(self):
""" Check using the new pdu.encode produces the correct output """
pdu = A_RELEASE_RQ()
pdu.Decode(a_release_rq)
s = pdu.Encode()
t = pdu.encode()
self.assertEqual(s, t)
class TestPDU_A_RELEASE_RP(unittest.TestCase):
def test_string_output(self):
"""Test the string output"""
pdu = A_RELEASE_RP()
pdu.Decode(a_release_rp)
self.assertTrue("0x06" in pdu.__str__())
self.assertTrue("10 bytes" in pdu.__str__())
def test_stream_decode_values_types(self):
""" Check decoding the release_rp stream produces the correct objects """
pdu = A_RELEASE_RP()
pdu.Decode(a_release_rp)
self.assertEqual(pdu.pdu_type, 0x06)
self.assertEqual(pdu.pdu_length, 4)
self.assertEqual(pdu.get_length(), 10)
self.assertTrue(isinstance(pdu.pdu_type, int))
self.assertTrue(isinstance(pdu.pdu_length, int))
def test_stream_encode(self):
""" Check encoding an release_rp produces the correct output """
pdu = A_RELEASE_RP()
pdu.Decode(a_release_rp)
s = pdu.Encode()
self.assertEqual(s, a_release_rp)
def test_new_encode(self):
""" Check encoding using new generic method """
pdu = A_RELEASE_RP()
pdu.Decode(a_release_rp)
s = pdu.encode()
self.assertEqual(s, a_release_rp)
def test_to_primitive(self):
""" Check converting PDU to primitive """
pdu = A_RELEASE_RP()
pdu.Decode(a_release_rp)
primitive = pdu.ToParams()
self.assertEqual(primitive.reason, "normal")
self.assertEqual(primitive.result, "affirmative")
def test_from_primitive(self):
""" Check converting PDU to primitive """
orig_pdu = A_RELEASE_RP()
orig_pdu.Decode(a_release_rp)
primitive = orig_pdu.ToParams()
new_pdu = A_RELEASE_RP()
new_pdu.FromParams(primitive)
self.assertEqual(new_pdu, orig_pdu)
def test_generic_encode(self):
""" Check using the new pdu.encode produces the correct output """
pdu = A_RELEASE_RP()
pdu.Decode(a_release_rp)
s = pdu.Encode()
t = pdu.encode()
self.assertEqual(s, t)
class TestPDU_A_ABORT(unittest.TestCase):
def test_string_output(self):
"""Test the string output"""
pdu = A_ABORT_RQ()
pdu.Decode(a_abort)
self.assertTrue("0x07" in pdu.__str__())
self.assertTrue("4 bytes" in pdu.__str__())
self.assertTrue("DUL service-user" in pdu.__str__())
def test_a_abort_stream_decode_values_types(self):
""" Check decoding the a_abort stream produces the correct objects """
pdu = A_ABORT_RQ()
pdu.Decode(a_abort)
self.assertEqual(pdu.pdu_type, 0x07)
self.assertEqual(pdu.pdu_length, 4)
self.assertEqual(pdu.source, 0)
self.assertEqual(pdu.reason_diagnostic, 0)
self.assertEqual(pdu.get_length(), 10)
self.assertTrue(isinstance(pdu.pdu_type, int))
self.assertTrue(isinstance(pdu.pdu_length, int))
self.assertTrue(isinstance(pdu.source, int))
self.assertTrue(isinstance(pdu.reason_diagnostic, int))
def test_a_p_abort_stream_decode_values_types(self):
""" Check decoding the a_abort stream produces the correct objects """
pdu = A_ABORT_RQ()
pdu.Decode(a_p_abort)
self.assertEqual(pdu.pdu_type, 0x07)
self.assertEqual(pdu.pdu_length, 4)
self.assertEqual(pdu.source, 2)
self.assertEqual(pdu.reason_diagnostic, 4)
self.assertTrue(isinstance(pdu.pdu_type, int))
self.assertTrue(isinstance(pdu.pdu_length, int))
self.assertTrue(isinstance(pdu.source, int))
self.assertTrue(isinstance(pdu.reason_diagnostic, int))
def test_a_abort_stream_encode(self):
""" Check encoding an a_abort produces the correct output """
pdu = A_ABORT_RQ()
pdu.Decode(a_abort)
s = pdu.Encode()
self.assertEqual(s, a_abort)
def test_new_encode_a_abort(self):
""" Check encoding using new generic method """
pdu = A_ABORT_RQ()
pdu.Decode(a_abort)
s = pdu.encode()
self.assertEqual(s, a_abort)
def test_a_p_abort_stream_encode(self):
""" Check encoding an a_abort produces the correct output """
pdu = A_ABORT_RQ()
pdu.Decode(a_p_abort)
s = pdu.Encode()
self.assertEqual(s, a_p_abort)
def test_new_encode_a_p_abort(self):
""" Check encoding using new generic method """
pdu = A_ABORT_RQ()
pdu.Decode(a_p_abort)
s = pdu.encode()
self.assertEqual(s, a_p_abort)
def test_to_a_abort_primitive(self):
""" Check converting PDU to a_abort primitive """
pdu = A_ABORT_RQ()
pdu.Decode(a_abort)
primitive = pdu.ToParams()
self.assertTrue(isinstance(primitive, A_ABORT))
self.assertEqual(primitive.abort_source, 0)
def test_to_a_p_abort_primitive(self):
""" Check converting PDU to a_p_abort primitive """
pdu = A_ABORT_RQ()
pdu.Decode(a_p_abort)
primitive = pdu.ToParams()
self.assertTrue(isinstance(primitive, A_P_ABORT))
self.assertEqual(primitive.provider_reason, 4)
def test_a_abort_from_primitive(self):
""" Check converting PDU to primitive """
orig_pdu = A_ABORT_RQ()
orig_pdu.Decode(a_abort)
primitive = orig_pdu.ToParams()
new_pdu = A_ABORT_RQ()
new_pdu.FromParams(primitive)
self.assertEqual(new_pdu, orig_pdu)
def test_a_p_abort_from_primitive(self):
""" Check converting PDU to primitive """
orig_pdu = A_ABORT_RQ()
orig_pdu.Decode(a_p_abort)
primitive = orig_pdu.ToParams()
new_pdu = A_ABORT_RQ()
new_pdu.FromParams(primitive)
self.assertEqual(new_pdu, orig_pdu)
def test_source_str(self):
""" Check the source str returns correct values """
pdu = A_ABORT_RQ()
pdu.Decode(a_abort)
pdu.source = 0
self.assertEqual(pdu.source_str, 'DUL service-user')
pdu.source = 2
self.assertEqual(pdu.source_str, 'DUL service-provider')
def test_reason_str(self):
""" Check the reaspm str returns correct values """
pdu = A_ABORT_RQ()
pdu.Decode(a_abort)
pdu.source = 2
pdu.reason_diagnostic = 0
self.assertEqual(pdu.reason_str, "No reason given")
pdu.reason_diagnostic = 1
self.assertEqual(pdu.reason_str, "Unrecognised PDU")
pdu.reason_diagnostic = 2
self.assertEqual(pdu.reason_str, "Unexpected PDU")
pdu.reason_diagnostic = 3
self.assertEqual(pdu.reason_str, "Reserved")
pdu.reason_diagnostic = 4
self.assertEqual(pdu.reason_str, "Unrecognised PDU parameter")
pdu.reason_diagnostic = 5
self.assertEqual(pdu.reason_str, "Unexpected PDU parameter")
pdu.reason_diagnostic = 6
self.assertEqual(pdu.reason_str, "Invalid PDU parameter value")
def test_generic_encode(self):
""" Check using the new pdu.encode produces the correct output """
pdu = A_ABORT_RQ()
pdu.Decode(a_abort)
s = pdu.Encode()
t = pdu.encode()
self.assertEqual(s, t)
if __name__ == "__main__":
unittest.main()
| 37.637747
| 106
| 0.671708
| 5,978
| 49,456
| 5.309635
| 0.050853
| 0.094042
| 0.094515
| 0.02574
| 0.881888
| 0.841215
| 0.795942
| 0.766485
| 0.728395
| 0.697237
| 0
| 0.018156
| 0.231539
| 49,456
| 1,313
| 107
| 37.666413
| 0.817024
| 0.125748
| 0
| 0.704
| 0
| 0.002286
| 0.034142
| 0.006707
| 0
| 0
| 0.00279
| 0
| 0.419429
| 1
| 0.102857
| false
| 0
| 0.009143
| 0
| 0.134857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
81b8d298757a9128a734fc2be98a15ac23595bd2
| 14,057
|
py
|
Python
|
req_update/tests/test_req_update.py
|
albertyw/pip-update
|
27d5ab1da54ce054e3319060c66aec35e8f0b6c2
|
[
"MIT"
] | null | null | null |
req_update/tests/test_req_update.py
|
albertyw/pip-update
|
27d5ab1da54ce054e3319060c66aec35e8f0b6c2
|
[
"MIT"
] | null | null | null |
req_update/tests/test_req_update.py
|
albertyw/pip-update
|
27d5ab1da54ce054e3319060c66aec35e8f0b6c2
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import argparse
import io
import sys
from typing import List
import unittest
from unittest.mock import MagicMock, patch
from req_update import req_update
PIP_OUTDATED = [
{"name": "varsnap", "version": "1.0.0", "latest_version": "1.2.3"}
]
class TestMain(unittest.TestCase):
def test_main(self) -> None:
with patch("req_update.req_update.ReqUpdate") as mock_req_update:
req_update.main()
self.assertTrue(mock_req_update().main.called)
class TestReqUpdateMain(unittest.TestCase):
def setUp(self) -> None:
self.req_update = req_update.ReqUpdate()
self.mock_get_args = MagicMock()
setattr(self.req_update, "get_args", self.mock_get_args)
self.mock_check = MagicMock()
setattr(
self.req_update.util,
"check_repository_cleanliness",
self.mock_check,
)
self.mock_create_branch = MagicMock()
setattr(self.req_update.util, "create_branch", self.mock_create_branch)
self.mock_python_applicable = MagicMock()
setattr(
self.req_update.python,
"check_applicable",
self.mock_python_applicable,
)
self.mock_python_update = MagicMock()
setattr(
self.req_update.python,
"update_dependencies",
self.mock_python_update,
)
self.mock_node_applicable = MagicMock()
setattr(
self.req_update.node, "check_applicable", self.mock_node_applicable
)
self.mock_node_update = MagicMock()
setattr(
self.req_update.node,
"update_dependencies",
self.mock_node_update,
)
self.mock_go_applicable = MagicMock()
setattr(
self.req_update.go, "check_applicable", self.mock_go_applicable
)
self.mock_go_update = MagicMock()
setattr(
self.req_update.go,
"update_dependencies",
self.mock_go_update,
)
self.mock_rollback = MagicMock()
setattr(self.req_update.util, "rollback_branch", self.mock_rollback)
def test_main_no_applicable(self) -> None:
self.mock_python_applicable.return_value = False
self.mock_node_applicable.return_value = False
self.mock_go_applicable.return_value = False
updated = self.req_update.main()
self.assertFalse(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertFalse(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertFalse(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertFalse(self.mock_go_update.called)
self.assertFalse(self.mock_create_branch.called)
self.assertFalse(self.mock_rollback.called)
def test_main_python_applicable_no_update(self) -> None:
self.mock_python_applicable.return_value = True
self.mock_python_update.return_value = False
self.mock_node_applicable.return_value = False
self.mock_go_applicable.return_value = False
updated = self.req_update.main()
self.assertFalse(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertTrue(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertFalse(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertFalse(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertTrue(self.mock_rollback.called)
def test_main_python_applicable_update(self) -> None:
self.mock_python_applicable.return_value = True
self.mock_python_update.return_value = True
self.mock_node_applicable.return_value = False
self.mock_go_applicable.return_value = False
updated = self.req_update.main()
self.assertTrue(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertTrue(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertFalse(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertFalse(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertFalse(self.mock_rollback.called)
def test_main_node_applicable_no_update(self) -> None:
self.mock_python_applicable.return_value = False
self.mock_node_applicable.return_value = True
self.mock_node_update.return_value = False
self.mock_go_applicable.return_value = False
updated = self.req_update.main()
self.assertFalse(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertFalse(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertTrue(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertFalse(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertTrue(self.mock_rollback.called)
def test_main_node_applicable_update(self) -> None:
self.mock_python_applicable.return_value = False
self.mock_node_applicable.return_value = True
self.mock_node_update.return_value = True
self.mock_go_applicable.return_value = False
updated = self.req_update.main()
self.assertTrue(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertFalse(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertTrue(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertFalse(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertFalse(self.mock_rollback.called)
def test_main_go_applicable_no_update(self) -> None:
self.mock_python_applicable.return_value = False
self.mock_node_applicable.return_value = False
self.mock_go_applicable.return_value = True
self.mock_go_update.return_value = False
updated = self.req_update.main()
self.assertFalse(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertFalse(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertFalse(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertTrue(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertTrue(self.mock_rollback.called)
def test_main_go_applicable_update(self) -> None:
self.mock_python_applicable.return_value = False
self.mock_node_applicable.return_value = False
self.mock_go_applicable.return_value = True
self.mock_go_update.return_value = True
updated = self.req_update.main()
self.assertTrue(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertFalse(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertFalse(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertTrue(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertFalse(self.mock_rollback.called)
def test_main_all_applicable_no_update(self) -> None:
self.mock_python_applicable.return_value = True
self.mock_python_update.return_value = False
self.mock_node_applicable.return_value = True
self.mock_node_update.return_value = False
self.mock_go_applicable.return_value = True
self.mock_go_update.return_value = False
updated = self.req_update.main()
self.assertFalse(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertTrue(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertTrue(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertTrue(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertTrue(self.mock_rollback.called)
def test_main_all_applicable_python_update(self) -> None:
self.mock_python_applicable.return_value = True
self.mock_python_update.return_value = True
self.mock_node_applicable.return_value = True
self.mock_node_update.return_value = False
self.mock_go_applicable.return_value = True
self.mock_go_update.return_value = False
updated = self.req_update.main()
self.assertTrue(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertTrue(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertTrue(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertTrue(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertFalse(self.mock_rollback.called)
def test_main_all_applicable_node_update(self) -> None:
self.mock_python_applicable.return_value = True
self.mock_python_update.return_value = False
self.mock_node_applicable.return_value = True
self.mock_node_update.return_value = True
self.mock_go_applicable.return_value = True
self.mock_go_update.return_value = False
updated = self.req_update.main()
self.assertTrue(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertTrue(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertTrue(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertTrue(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertFalse(self.mock_rollback.called)
def test_main_all_applicable_go_update(self) -> None:
self.mock_python_applicable.return_value = True
self.mock_python_update.return_value = False
self.mock_node_applicable.return_value = True
self.mock_node_update.return_value = False
self.mock_go_applicable.return_value = True
self.mock_go_update.return_value = True
updated = self.req_update.main()
self.assertTrue(updated)
self.assertTrue(self.mock_get_args.called)
self.assertTrue(self.mock_check.called)
self.assertTrue(self.mock_python_applicable.called)
self.assertTrue(self.mock_python_update.called)
self.assertTrue(self.mock_node_applicable.called)
self.assertTrue(self.mock_node_update.called)
self.assertTrue(self.mock_go_applicable.called)
self.assertTrue(self.mock_go_update.called)
self.assertTrue(self.mock_create_branch.called)
self.assertFalse(self.mock_rollback.called)
class TestGetArgs(unittest.TestCase):
def setUp(self) -> None:
self.req_update = req_update.ReqUpdate()
def get_args_with_argv(self, argv: List[str]) -> argparse.Namespace:
argv = ["req_update.py"] + argv
with patch.object(sys, "argv", argv):
args = self.req_update.get_args()
return args
def test_none(self) -> None:
args = self.get_args_with_argv([])
self.assertFalse(args.verbose)
def test_push(self) -> None:
self.assertFalse(self.req_update.util.push)
args = self.get_args_with_argv([])
self.assertFalse(args.push)
args = self.get_args_with_argv(["--push"])
self.assertTrue(args.push)
args = self.get_args_with_argv(["-p"])
self.assertTrue(args.push)
self.assertTrue(self.req_update.util.push)
def test_dryrun(self) -> None:
self.assertTrue(self.req_update.util.dry_run)
args = self.get_args_with_argv([])
self.assertFalse(args.dryrun)
args = self.get_args_with_argv(["--dryrun"])
self.assertTrue(args.dryrun)
args = self.get_args_with_argv(["-d"])
self.assertTrue(args.dryrun)
self.assertTrue(self.req_update.util.dry_run)
def test_verbose(self) -> None:
args = self.get_args_with_argv(["--verbose"])
self.assertTrue(args.verbose)
args = self.get_args_with_argv(["-v"])
self.assertTrue(args.verbose)
def test_version(self) -> None:
with patch("sys.stdout", new_callable=io.StringIO) as mock_out:
with self.assertRaises(SystemExit):
self.get_args_with_argv(["--version"])
self.assertTrue(len(mock_out.getvalue()) > 0)
| 43.520124
| 79
| 0.703635
| 1,751
| 14,057
| 5.340948
| 0.049115
| 0.154833
| 0.173225
| 0.204662
| 0.860565
| 0.841317
| 0.798653
| 0.798225
| 0.773311
| 0.755881
| 0
| 0.000626
| 0.205094
| 14,057
| 322
| 80
| 43.65528
| 0.836316
| 0
| 0
| 0.706081
| 0
| 0
| 0.02184
| 0.004197
| 0
| 0
| 0
| 0
| 0.462838
| 1
| 0.067568
| false
| 0
| 0.027027
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c4c277d962eb0aa7e8531ec17544485a94894806
| 19,620
|
py
|
Python
|
data_gathering_subsystem/test/data_modules/test_ocean_mass.py
|
diego-hermida/ClimateChangeApp
|
576d49ec5b76f709cc86874ffb03f4a38dbbbbfd
|
[
"MIT"
] | 2
|
2018-07-01T20:36:46.000Z
|
2019-11-01T22:47:06.000Z
|
data_gathering_subsystem/test/data_modules/test_ocean_mass.py
|
diego-hermida/ClimateChangeApp
|
576d49ec5b76f709cc86874ffb03f4a38dbbbbfd
|
[
"MIT"
] | 1
|
2021-06-10T20:28:53.000Z
|
2021-06-10T20:28:53.000Z
|
data_gathering_subsystem/test/data_modules/test_ocean_mass.py
|
diego-hermida/ClimateChangeApp
|
576d49ec5b76f709cc86874ffb03f4a38dbbbbfd
|
[
"MIT"
] | null | null | null |
from unittest import TestCase, mock
from unittest.mock import Mock
import data_gathering_subsystem.data_modules.ocean_mass.ocean_mass as ocean_mass
from utilities.util import deserialize_date, serialize_date
class TestOceanMass(TestCase):
@classmethod
def setUpClass(cls):
ocean_mass.instance(log_to_stdout=False, log_to_telegram=False).remove_files()
def tearDown(self):
if hasattr(self, 'data_collector'):
self.data_collector.remove_files()
def test_instance(self):
self.assertIs(ocean_mass.instance(log_to_file=False, log_to_stdout=False, log_to_telegram=False),
ocean_mass.instance(log_to_file=False, log_to_stdout=False, log_to_telegram=False))
i1 = ocean_mass.instance(log_to_file=False, log_to_stdout=False, log_to_telegram=False)
i1._transition_state = i1._FINISHED
self.assertIsNot(i1, ocean_mass.instance(log_to_file=False, log_to_stdout=False, log_to_telegram=False))
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.Reader')
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.FTP')
def test_correct_data_collection(self, mock_ftp, mock_reader):
# Mocking MongoDBCollection: initialization and operations
mock_collection = Mock()
mock_collection.close.return_value = None
mock_collection.bulk_write.return_value = insert_result = Mock()
insert_result.bulk_api_result = {'nInserted': 4, 'nMatched': 0, 'nUpserted': 0}
# Mocking FTP operations
mock_ftp.return_value.nlst.return_value = ['antarctica_mass_200204_201701.txt',
'greenland_mass_200204_201701.txt']
mock_ftp.return_value.sendcmd.return_value = '123420170801221800'
side_effect = [['HDR Greenland Mass Trend (04/2002 - 06/2017): -285.85 +/-21.01 Gt/yr\n',
'2002.29 0.00 164.18\n', '2002.35 62.12 103.45\n'],
['2002.29 0.00 113.95\n', '2002.35 14.61 66.61\n']]
mock_reader.return_value.get_data = Mock(side_effect=side_effect)
# Actual execution
self.data_collector = ocean_mass.instance(log_to_stdout=False, log_to_telegram=False)
self.data_collector.collection = mock_collection
self.data_collector.run()
self.assertTrue(mock_collection.method_calls)
self.assertTrue(mock_ftp.called)
self.assertTrue(mock_reader.called)
self.assertTrue(self.data_collector.finished_execution())
self.assertTrue(self.data_collector.successful_execution())
self.assertIsNotNone(self.data_collector.state['data_elements'])
self.assertIsNotNone(self.data_collector.state['inserted_elements'])
self.assertEqual(4, self.data_collector.state['data_elements'])
self.assertEqual(4, self.data_collector.state['inserted_elements'])
self.assertEqual(self.data_collector.config['MAX_UPDATE_FREQUENCY'],
self.data_collector.state['antarctica']['update_frequency'])
self.assertEqual(self.data_collector.config['MAX_UPDATE_FREQUENCY'],
self.data_collector.state['greenland']['update_frequency'])
data = mock_collection.mock_calls[0][1][0]
for v in data:
if v._doc['$setOnInsert']['type'] == ocean_mass.MassType.antarctica:
self.assertAlmostEqual(-285.85, v._doc['$setOnInsert']['measures'][2]['trend'], 0.01)
else:
self.assertIsNone(v._doc['$setOnInsert']['measures'][2]['trend'])
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.Reader')
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.FTP')
def test_correct_data_collection_with_unnecesary_files(self, mock_ftp, mock_reader):
# Mocking MongoDBCollection: initialization and operations
mock_collection = Mock()
mock_collection.close.return_value = None
mock_collection.bulk_write.return_value = insert_result = Mock()
insert_result.bulk_api_result = {'nInserted': 4, 'nMatched': 0, 'nUpserted': 0}
# Mocking FTP operations
mock_ftp.return_value.nlst.return_value = ['antarctica_mass_200204_201701.txt', 'unnecesary_file.txt',
'greenland_mass_200204_201701.txt']
mock_ftp.return_value.sendcmd.return_value = '123420170801221800'
side_effect = [['HDR Greenland Mass Trend (04/2002 - 06/2017): -285.85 +/-21.01 Gt/yr\n',
'2002.29 0.00 164.18\n', '2002.35 62.12 103.45\n'],
['2002.29 0.00 113.95\n', '2002.35 14.61 66.61\n']]
mock_reader.return_value.get_data = Mock(side_effect=side_effect)
# Actual execution
self.data_collector = ocean_mass.instance(log_to_stdout=False, log_to_telegram=False)
self.data_collector.collection = mock_collection
self.data_collector.run()
self.assertTrue(mock_collection.method_calls)
self.assertTrue(mock_ftp.called)
self.assertTrue(mock_reader.called)
self.assertTrue(self.data_collector.finished_execution())
self.assertTrue(self.data_collector.successful_execution())
self.assertIsNotNone(self.data_collector.state['data_elements'])
self.assertIsNotNone(self.data_collector.state['inserted_elements'])
self.assertEqual(4, self.data_collector.state['data_elements'])
self.assertEqual(4, self.data_collector.state['inserted_elements'])
self.assertEqual(self.data_collector.config['MAX_UPDATE_FREQUENCY'],
self.data_collector.state['antarctica']['update_frequency'])
self.assertEqual(self.data_collector.config['MAX_UPDATE_FREQUENCY'],
self.data_collector.state['greenland']['update_frequency'])
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.Reader')
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.FTP')
def test_data_collection_with_not_all_files_updated_since_last_check(self, mock_ftp, mock_reader):
# Mocking MongoDBCollection: initialization and operations
mock_collection = Mock()
mock_collection.close.return_value = None
mock_collection.bulk_write.return_value = insert_result = Mock()
insert_result.bulk_api_result = {'nInserted': 4, 'nMatched': 0, 'nUpserted': 0}
# Mocking FTP operations
mock_ftp.return_value.nlst.return_value = ['antarctica_mass_200204_201701.txt',
'greenland_mass_200204_201701.txt']
mock_ftp.return_value.sendcmd.return_value = '123420170801221800'
side_effect = [['HDR Greenland Mass Trend (04/2002 - 06/2017): -285.85 +/-21.01 Gt/yr\n',
'2002.29 0.00 164.18\n', '2002.35 62.12 103.45\n'],
['2002.29 0.00 113.95\n', '2002.35 14.61 66.61\n']]
mock_reader.return_value.get_data = Mock(side_effect=side_effect)
# Actual execution
self.data_collector = ocean_mass.instance(log_to_stdout=False, log_to_telegram=False)
self.data_collector.collection = mock_collection
self.data_collector.run()
self.assertTrue(mock_collection.method_calls)
self.assertTrue(mock_ftp.called)
self.assertTrue(mock_reader.called)
self.assertTrue(self.data_collector.finished_execution())
self.assertTrue(self.data_collector.successful_execution())
self.assertIsNotNone(self.data_collector.state['data_elements'])
self.assertIsNotNone(self.data_collector.state['inserted_elements'])
self.assertEqual(4, self.data_collector.state['data_elements'])
self.assertEqual(4, self.data_collector.state['inserted_elements'])
self.assertEqual(self.data_collector.config['MAX_UPDATE_FREQUENCY'],
self.data_collector.state['antarctica']['update_frequency'])
self.assertEqual(self.data_collector.config['MAX_UPDATE_FREQUENCY'],
self.data_collector.state['greenland']['update_frequency'])
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.FTP')
def test_data_collection_with_no_new_data(self, mock_ftp):
# Mocking FTP operations
mock_ftp.return_value.nlst.return_value = ['antarctica_mass_200204_201701.txt',
'greenland_mass_200204_201701.txt']
mock_ftp.return_value.sendcmd.return_value = '123420170801221800'
# Actual execution
self.data_collector = ocean_mass.instance(log_to_stdout=False, log_to_telegram=False)
last_request = serialize_date(
deserialize_date('20170801221800.1234', self.data_collector.config['FTP_DATE_FORMAT']))
self.data_collector.config['STATE_STRUCT']['antarctica']['last_modified'] = last_request
self.data_collector.config['STATE_STRUCT']['greenland']['last_modified'] = last_request
self.data_collector.run()
self.assertTrue(mock_ftp.called)
self.assertTrue(self.data_collector.finished_execution())
self.assertTrue(self.data_collector.successful_execution())
self.assertIsNotNone(self.data_collector.state['data_elements'])
self.assertIsNotNone(self.data_collector.state['inserted_elements'])
self.assertEqual(0, self.data_collector.state['data_elements'])
self.assertEqual(0, self.data_collector.state['inserted_elements'])
self.assertEqual(self.data_collector.config['MIN_UPDATE_FREQUENCY'],
self.data_collector.state['antarctica']['update_frequency'])
self.assertEqual(self.data_collector.config['MIN_UPDATE_FREQUENCY'],
self.data_collector.state['greenland']['update_frequency'])
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.FTP')
def test_data_collection_with_no_new_data_and_unnecessary_files(self, mock_ftp):
# Mocking FTP operations
mock_ftp.return_value.nlst.return_value = ['antarctica_mass_200204_201701.txt', 'unnecessary_file.txt',
'greenland_mass_200204_201701.txt']
mock_ftp.return_value.sendcmd.return_value = '123420170801221800'
# Actual execution
self.data_collector = ocean_mass.instance(log_to_stdout=False, log_to_telegram=False)
last_request = serialize_date(
deserialize_date('20170801221800.1234', self.data_collector.config['FTP_DATE_FORMAT']))
self.data_collector.config['STATE_STRUCT']['antarctica']['last_modified'] = last_request
self.data_collector.config['STATE_STRUCT']['greenland']['last_modified'] = last_request
self.data_collector.run()
self.assertTrue(mock_ftp.called)
self.assertTrue(self.data_collector.finished_execution())
self.assertTrue(self.data_collector.successful_execution())
self.assertIsNotNone(self.data_collector.state['data_elements'])
self.assertIsNotNone(self.data_collector.state['inserted_elements'])
self.assertEqual(0, self.data_collector.state['data_elements'])
self.assertEqual(0, self.data_collector.state['inserted_elements'])
self.assertEqual(self.data_collector.config['MIN_UPDATE_FREQUENCY'],
self.data_collector.state['antarctica']['update_frequency'])
self.assertEqual(self.data_collector.config['MIN_UPDATE_FREQUENCY'],
self.data_collector.state['greenland']['update_frequency'])
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.Reader')
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.FTP')
def test_data_collection_invalid_data_from_server(self, mock_ftp, mock_reader):
# Mocking FTP operations
mock_ftp.return_value.nlst.return_value = ['antarctica_mass_200204_201701.txt',
'greenland_mass_200204_201701.txt']
mock_ftp.return_value.sendcmd.return_value = '123420170801221800'
mock_reader.return_value.get_data.return_value = ['Invalid data', 'Cannot be parsed']
# Actual execution
self.data_collector = ocean_mass.instance(log_to_stdout=False, log_to_telegram=False)
self.data_collector.run()
self.assertTrue(mock_ftp.called)
self.assertTrue(self.data_collector.finished_execution())
self.assertFalse(self.data_collector.successful_execution())
self.assertIsNone(self.data_collector.state['data_elements'])
self.assertIsNone(self.data_collector.state['inserted_elements'])
self.assertIsNotNone(self.data_collector.state['error'])
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.Reader')
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.FTP')
def test_data_collection_with_not_all_items_saved(self, mock_ftp, mock_reader):
# Mocking MongoDBCollection: initialization and operations
mock_collection = Mock()
mock_collection.close.return_value = None
mock_collection.bulk_write.return_value = insert_result = Mock()
insert_result.bulk_api_result = {'nInserted': 7, 'nMatched': 0, 'nUpserted': 0}
# Mocking FTP operations
mock_ftp.return_value.nlst.return_value = ['antarctica_mass_200204_201701.txt',
'greenland_mass_200204_201701.txt']
mock_ftp.return_value.sendcmd.return_value = '123420170801221800'
side_effect = [
['2002.29 0.00 164.18\n', '2002.35 62.12 103.45\n', '2002.29 0.00 164.18\n',
'2002.35 62.12 103.45\n'],
['2002.29 0.00 113.95\n', '2002.35 14.61 66.61\n', '2002.29 0.00 113.95\n',
'2002.35 14.61 66.61\n']]
mock_reader.return_value.get_data = Mock(side_effect=side_effect)
# Actual execution
self.data_collector = ocean_mass.instance(log_to_stdout=False, log_to_telegram=False)
self.data_collector.collection = mock_collection
self.data_collector.run()
self.assertTrue(mock_collection.method_calls)
self.assertTrue(mock_ftp.called)
self.assertTrue(mock_reader.called)
self.assertTrue(self.data_collector.finished_execution())
self.assertFalse(self.data_collector.successful_execution())
self.assertIsNotNone(self.data_collector.state['data_elements'])
self.assertIsNotNone(self.data_collector.state['inserted_elements'])
self.assertEqual(8, self.data_collector.state['data_elements'])
self.assertEqual(7, self.data_collector.state['inserted_elements'])
self.assertEqual(self.data_collector.config['MAX_UPDATE_FREQUENCY'],
self.data_collector.state['antarctica']['update_frequency'])
self.assertEqual(self.data_collector.config['MAX_UPDATE_FREQUENCY'],
self.data_collector.state['greenland']['update_frequency'])
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.Reader')
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.FTP')
def test_data_collection_with_too_much_items_not_saved(self, mock_ftp, mock_reader):
# Mocking MongoDBCollection: initialization and operations
mock_collection = Mock()
mock_collection.close.return_value = None
mock_collection.bulk_write.return_value = insert_result = Mock()
insert_result.bulk_api_result = {'nInserted': 6, 'nMatched': 0, 'nUpserted': 0}
# Mocking FTP operations
mock_ftp.return_value.nlst.return_value = ['antarctica_mass_200204_201701.txt',
'greenland_mass_200204_201701.txt']
mock_ftp.return_value.sendcmd.return_value = '123420170801221800'
side_effect = [
['2002.29 0.00 164.18\n', '2002.35 62.12 103.45\n', '2002.29 0.00 164.18\n',
'2002.35 62.12 103.45\n'],
['2002.29 0.00 113.95\n', '2002.35 14.61 66.61\n', '2002.29 0.00 113.95\n',
'2002.35 14.61 66.61\n']]
mock_reader.return_value.get_data = Mock(side_effect=side_effect)
# Actual execution
self.data_collector = ocean_mass.instance(log_to_stdout=False, log_to_telegram=False)
self.data_collector.collection = mock_collection
self.data_collector.run()
self.assertTrue(mock_collection.method_calls)
self.assertTrue(mock_ftp.called)
self.assertTrue(mock_reader.called)
self.assertTrue(self.data_collector.finished_execution())
self.assertFalse(self.data_collector.successful_execution())
self.assertIsNotNone(self.data_collector.state['data_elements'])
self.assertIsNotNone(self.data_collector.state['inserted_elements'])
self.assertEqual(8, self.data_collector.state['data_elements'])
self.assertEqual(6, self.data_collector.state['inserted_elements'])
self.assertIsNotNone(self.data_collector.state['error'])
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.Reader')
@mock.patch('data_gathering_subsystem.data_modules.ocean_mass.ocean_mass.FTP')
def test_data_collection_with_no_items_saved(self, mock_ftp, mock_reader):
# Mocking MongoDBCollection: initialization and operations
mock_collection = Mock()
mock_collection.close.return_value = None
mock_collection.bulk_write.return_value = insert_result = Mock()
insert_result.bulk_api_result = {'nInserted': 0, 'nMatched': 0, 'nUpserted': 0}
# Mocking FTP operations
mock_ftp.return_value.nlst.return_value = ['antarctica_mass_200204_201701.txt',
'greenland_mass_200204_201701.txt']
mock_ftp.return_value.sendcmd.return_value = '123420170801221800'
side_effect = [
['2002.29 0.00 164.18\n', '2002.35 62.12 103.45\n', '2002.29 0.00 164.18\n',
'2002.35 62.12 103.45\n'],
['2002.29 0.00 113.95\n', '2002.35 14.61 66.61\n', '2002.29 0.00 113.95\n',
'2002.35 14.61 66.61\n']]
mock_reader.return_value.get_data = Mock(side_effect=side_effect)
# Actual execution
self.data_collector = ocean_mass.instance(log_to_stdout=False, log_to_telegram=False)
self.data_collector.collection = mock_collection
self.data_collector.run()
self.assertTrue(mock_collection.method_calls)
self.assertTrue(mock_ftp.called)
self.assertTrue(mock_reader.called)
self.assertTrue(self.data_collector.finished_execution())
self.assertFalse(self.data_collector.successful_execution())
self.assertIsNotNone(self.data_collector.state['data_elements'])
self.assertIsNotNone(self.data_collector.state['inserted_elements'])
self.assertEqual(8, self.data_collector.state['data_elements'])
self.assertEqual(0, self.data_collector.state['inserted_elements'])
self.assertIsNotNone(self.data_collector.state['error'])
| 63.701299
| 117
| 0.684149
| 2,371
| 19,620
| 5.358077
| 0.070434
| 0.069899
| 0.148536
| 0.084855
| 0.950645
| 0.950645
| 0.940334
| 0.937343
| 0.937343
| 0.933328
| 0
| 0.06695
| 0.207492
| 19,620
| 307
| 118
| 63.908795
| 0.75008
| 0.035729
| 0
| 0.839695
| 0
| 0.01145
| 0.247804
| 0.085424
| 0
| 0
| 0
| 0
| 0.351145
| 1
| 0.045802
| false
| 0
| 0.015267
| 0
| 0.064886
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c4eb9654c2fef6a1ce43073eb8b4fc9df53ba349
| 119
|
py
|
Python
|
pajbot/web/routes/__init__.py
|
smackedlol/pajbot
|
cc6d00e20fd0847f88e487937ac02d0011e05e67
|
[
"MIT"
] | 145
|
2019-06-08T15:38:40.000Z
|
2022-03-29T22:51:47.000Z
|
pajbot/web/routes/__init__.py
|
smackedlol/pajbot
|
cc6d00e20fd0847f88e487937ac02d0011e05e67
|
[
"MIT"
] | 671
|
2019-05-26T22:19:08.000Z
|
2022-03-31T06:00:49.000Z
|
pajbot/web/routes/__init__.py
|
smackedlol/pajbot
|
cc6d00e20fd0847f88e487937ac02d0011e05e67
|
[
"MIT"
] | 105
|
2019-05-25T18:22:13.000Z
|
2022-02-23T00:57:27.000Z
|
import pajbot.web.routes.admin
import pajbot.web.routes.api
import pajbot.web.routes.base
import pajbot.web.routes.clr
| 23.8
| 30
| 0.831933
| 20
| 119
| 4.95
| 0.4
| 0.484848
| 0.606061
| 0.848485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067227
| 119
| 4
| 31
| 29.75
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
f21df2b0b7590e07267b192eddc652d4a43ae242
| 183,463
|
py
|
Python
|
operators/konveyor-operator/python/pulumi_pulumi_kubernetes_crds_operators_konveyor_operator/velero/v1/_inputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
operators/konveyor-operator/python/pulumi_pulumi_kubernetes_crds_operators_konveyor_operator/velero/v1/_inputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | 2
|
2020-09-18T17:12:23.000Z
|
2020-12-30T19:40:56.000Z
|
operators/konveyor-operator/python/pulumi_pulumi_kubernetes_crds_operators_konveyor_operator/velero/v1/_inputs.py
|
pulumi/pulumi-kubernetes-crds
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'BackupSpecArgs',
'BackupSpecHooksArgs',
'BackupSpecHooksResourcesArgs',
'BackupSpecHooksResourcesLabelSelectorArgs',
'BackupSpecHooksResourcesLabelSelectorMatchExpressionsArgs',
'BackupSpecHooksResourcesPostArgs',
'BackupSpecHooksResourcesPostExecArgs',
'BackupSpecHooksResourcesPreArgs',
'BackupSpecHooksResourcesPreExecArgs',
'BackupSpecLabelSelectorArgs',
'BackupSpecLabelSelectorMatchExpressionsArgs',
'BackupStatusArgs',
'BackupStatusProgressArgs',
'BackupStorageLocationSpecArgs',
'BackupStorageLocationSpecObjectStorageArgs',
'BackupStorageLocationStatusArgs',
'DeleteBackupRequestSpecArgs',
'DeleteBackupRequestStatusArgs',
'DownloadRequestSpecArgs',
'DownloadRequestSpecTargetArgs',
'DownloadRequestStatusArgs',
'PodVolumeBackupSpecArgs',
'PodVolumeBackupSpecPodArgs',
'PodVolumeBackupStatusArgs',
'PodVolumeBackupStatusProgressArgs',
'PodVolumeRestoreSpecArgs',
'PodVolumeRestoreSpecPodArgs',
'PodVolumeRestoreStatusArgs',
'PodVolumeRestoreStatusProgressArgs',
'ResticRepositorySpecArgs',
'ResticRepositoryStatusArgs',
'RestoreSpecArgs',
'RestoreSpecLabelSelectorArgs',
'RestoreSpecLabelSelectorMatchExpressionsArgs',
'RestoreStatusArgs',
'RestoreStatusPodVolumeRestoreErrorsArgs',
'RestoreStatusPodVolumeRestoreVerifyErrorsArgs',
'ScheduleSpecArgs',
'ScheduleSpecTemplateArgs',
'ScheduleSpecTemplateHooksArgs',
'ScheduleSpecTemplateHooksResourcesArgs',
'ScheduleSpecTemplateHooksResourcesLabelSelectorArgs',
'ScheduleSpecTemplateHooksResourcesLabelSelectorMatchExpressionsArgs',
'ScheduleSpecTemplateHooksResourcesPostArgs',
'ScheduleSpecTemplateHooksResourcesPostExecArgs',
'ScheduleSpecTemplateHooksResourcesPreArgs',
'ScheduleSpecTemplateHooksResourcesPreExecArgs',
'ScheduleSpecTemplateLabelSelectorArgs',
'ScheduleSpecTemplateLabelSelectorMatchExpressionsArgs',
'ScheduleStatusArgs',
'ServerStatusRequestStatusArgs',
'ServerStatusRequestStatusPluginsArgs',
'VolumeSnapshotLocationSpecArgs',
'VolumeSnapshotLocationStatusArgs',
]
@pulumi.input_type
class BackupSpecArgs:
def __init__(__self__, *,
excluded_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
excluded_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hooks: Optional[pulumi.Input['BackupSpecHooksArgs']] = None,
include_cluster_resources: Optional[pulumi.Input[bool]] = None,
included_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
included_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
label_selector: Optional[pulumi.Input['BackupSpecLabelSelectorArgs']] = None,
snapshot_volumes: Optional[pulumi.Input[bool]] = None,
storage_location: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[str]] = None,
volume_snapshot_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
BackupSpec defines the specification for a Velero backup.
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_namespaces: ExcludedNamespaces contains a list of namespaces that are not included in the backup.
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_resources: ExcludedResources is a slice of resource names that are not included in the backup.
:param pulumi.Input['BackupSpecHooksArgs'] hooks: Hooks represent custom behaviors that should be executed at different phases of the backup.
:param pulumi.Input[bool] include_cluster_resources: IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the backup.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_namespaces: IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_resources: IncludedResources is a slice of resource names to include in the backup. If empty, all resources are included.
:param pulumi.Input['BackupSpecLabelSelectorArgs'] label_selector: LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional.
:param pulumi.Input[bool] snapshot_volumes: SnapshotVolumes specifies whether to take cloud snapshots of any PV's referenced in the set of objects included in the Backup.
:param pulumi.Input[str] storage_location: StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored.
:param pulumi.Input[str] ttl: TTL is a time.Duration-parseable string describing how long the Backup should be retained for.
:param pulumi.Input[Sequence[pulumi.Input[str]]] volume_snapshot_locations: VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup.
"""
if excluded_namespaces is not None:
pulumi.set(__self__, "excluded_namespaces", excluded_namespaces)
if excluded_resources is not None:
pulumi.set(__self__, "excluded_resources", excluded_resources)
if hooks is not None:
pulumi.set(__self__, "hooks", hooks)
if include_cluster_resources is not None:
pulumi.set(__self__, "include_cluster_resources", include_cluster_resources)
if included_namespaces is not None:
pulumi.set(__self__, "included_namespaces", included_namespaces)
if included_resources is not None:
pulumi.set(__self__, "included_resources", included_resources)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if snapshot_volumes is not None:
pulumi.set(__self__, "snapshot_volumes", snapshot_volumes)
if storage_location is not None:
pulumi.set(__self__, "storage_location", storage_location)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
if volume_snapshot_locations is not None:
pulumi.set(__self__, "volume_snapshot_locations", volume_snapshot_locations)
@property
@pulumi.getter(name="excludedNamespaces")
def excluded_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedNamespaces contains a list of namespaces that are not included in the backup.
"""
return pulumi.get(self, "excluded_namespaces")
@excluded_namespaces.setter
def excluded_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_namespaces", value)
@property
@pulumi.getter(name="excludedResources")
def excluded_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedResources is a slice of resource names that are not included in the backup.
"""
return pulumi.get(self, "excluded_resources")
@excluded_resources.setter
def excluded_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_resources", value)
@property
@pulumi.getter
def hooks(self) -> Optional[pulumi.Input['BackupSpecHooksArgs']]:
"""
Hooks represent custom behaviors that should be executed at different phases of the backup.
"""
return pulumi.get(self, "hooks")
@hooks.setter
def hooks(self, value: Optional[pulumi.Input['BackupSpecHooksArgs']]):
pulumi.set(self, "hooks", value)
@property
@pulumi.getter(name="includeClusterResources")
def include_cluster_resources(self) -> Optional[pulumi.Input[bool]]:
"""
IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the backup.
"""
return pulumi.get(self, "include_cluster_resources")
@include_cluster_resources.setter
def include_cluster_resources(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_cluster_resources", value)
@property
@pulumi.getter(name="includedNamespaces")
def included_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included.
"""
return pulumi.get(self, "included_namespaces")
@included_namespaces.setter
def included_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_namespaces", value)
@property
@pulumi.getter(name="includedResources")
def included_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedResources is a slice of resource names to include in the backup. If empty, all resources are included.
"""
return pulumi.get(self, "included_resources")
@included_resources.setter
def included_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_resources", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['BackupSpecLabelSelectorArgs']]:
"""
LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['BackupSpecLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter(name="snapshotVolumes")
def snapshot_volumes(self) -> Optional[pulumi.Input[bool]]:
"""
SnapshotVolumes specifies whether to take cloud snapshots of any PV's referenced in the set of objects included in the Backup.
"""
return pulumi.get(self, "snapshot_volumes")
@snapshot_volumes.setter
def snapshot_volumes(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "snapshot_volumes", value)
@property
@pulumi.getter(name="storageLocation")
def storage_location(self) -> Optional[pulumi.Input[str]]:
"""
StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored.
"""
return pulumi.get(self, "storage_location")
@storage_location.setter
def storage_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_location", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[str]]:
"""
TTL is a time.Duration-parseable string describing how long the Backup should be retained for.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ttl", value)
@property
@pulumi.getter(name="volumeSnapshotLocations")
def volume_snapshot_locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup.
"""
return pulumi.get(self, "volume_snapshot_locations")
@volume_snapshot_locations.setter
def volume_snapshot_locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "volume_snapshot_locations", value)
@pulumi.input_type
class BackupSpecHooksArgs:
def __init__(__self__, *,
resources: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesArgs']]]] = None):
"""
Hooks represent custom behaviors that should be executed at different phases of the backup.
:param pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesArgs']]] resources: Resources are hooks that should be executed when backing up individual instances of a resource.
"""
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesArgs']]]]:
"""
Resources are hooks that should be executed when backing up individual instances of a resource.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesArgs']]]]):
pulumi.set(self, "resources", value)
@pulumi.input_type
class BackupSpecHooksResourcesArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
excluded_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
excluded_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
included_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
included_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
label_selector: Optional[pulumi.Input['BackupSpecHooksResourcesLabelSelectorArgs']] = None,
post: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesPostArgs']]]] = None,
pre: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesPreArgs']]]] = None):
"""
BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on the rules defined for namespaces, resources, and label selector.
:param pulumi.Input[str] name: Name is the name of this hook.
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_namespaces: ExcludedNamespaces specifies the namespaces to which this hook spec does not apply.
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_resources: ExcludedResources specifies the resources to which this hook spec does not apply.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_namespaces: IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_resources: IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources.
:param pulumi.Input['BackupSpecHooksResourcesLabelSelectorArgs'] label_selector: LabelSelector, if specified, filters the resources to which this hook spec applies.
:param pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesPostArgs']]] post: PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. These are executed after all "additional items" from item actions are processed.
:param pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesPreArgs']]] pre: PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. These are executed before any "additional items" from item actions are processed.
"""
pulumi.set(__self__, "name", name)
if excluded_namespaces is not None:
pulumi.set(__self__, "excluded_namespaces", excluded_namespaces)
if excluded_resources is not None:
pulumi.set(__self__, "excluded_resources", excluded_resources)
if included_namespaces is not None:
pulumi.set(__self__, "included_namespaces", included_namespaces)
if included_resources is not None:
pulumi.set(__self__, "included_resources", included_resources)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if post is not None:
pulumi.set(__self__, "post", post)
if pre is not None:
pulumi.set(__self__, "pre", pre)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name is the name of this hook.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="excludedNamespaces")
def excluded_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedNamespaces specifies the namespaces to which this hook spec does not apply.
"""
return pulumi.get(self, "excluded_namespaces")
@excluded_namespaces.setter
def excluded_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_namespaces", value)
@property
@pulumi.getter(name="excludedResources")
def excluded_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedResources specifies the resources to which this hook spec does not apply.
"""
return pulumi.get(self, "excluded_resources")
@excluded_resources.setter
def excluded_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_resources", value)
@property
@pulumi.getter(name="includedNamespaces")
def included_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces.
"""
return pulumi.get(self, "included_namespaces")
@included_namespaces.setter
def included_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_namespaces", value)
@property
@pulumi.getter(name="includedResources")
def included_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources.
"""
return pulumi.get(self, "included_resources")
@included_resources.setter
def included_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_resources", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['BackupSpecHooksResourcesLabelSelectorArgs']]:
"""
LabelSelector, if specified, filters the resources to which this hook spec applies.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['BackupSpecHooksResourcesLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def post(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesPostArgs']]]]:
"""
PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. These are executed after all "additional items" from item actions are processed.
"""
return pulumi.get(self, "post")
@post.setter
def post(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesPostArgs']]]]):
pulumi.set(self, "post", value)
@property
@pulumi.getter
def pre(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesPreArgs']]]]:
"""
PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. These are executed before any "additional items" from item actions are processed.
"""
return pulumi.get(self, "pre")
@pre.setter
def pre(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesPreArgs']]]]):
pulumi.set(self, "pre", value)
@pulumi.input_type
class BackupSpecHooksResourcesLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
LabelSelector, if specified, filters the resources to which this hook spec applies.
:param pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecHooksResourcesLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class BackupSpecHooksResourcesLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class BackupSpecHooksResourcesPostArgs:
def __init__(__self__, *,
exec_: pulumi.Input['BackupSpecHooksResourcesPostExecArgs']):
"""
BackupResourceHook defines a hook for a resource.
:param pulumi.Input['BackupSpecHooksResourcesPostExecArgs'] exec_: Exec defines an exec hook.
"""
pulumi.set(__self__, "exec_", exec_)
@property
@pulumi.getter(name="exec")
def exec_(self) -> pulumi.Input['BackupSpecHooksResourcesPostExecArgs']:
"""
Exec defines an exec hook.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: pulumi.Input['BackupSpecHooksResourcesPostExecArgs']):
pulumi.set(self, "exec_", value)
@pulumi.input_type
class BackupSpecHooksResourcesPostExecArgs:
def __init__(__self__, *,
command: pulumi.Input[Sequence[pulumi.Input[str]]],
container: Optional[pulumi.Input[str]] = None,
on_error: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Exec defines an exec hook.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command and arguments to execute.
:param pulumi.Input[str] container: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used.
:param pulumi.Input[str] on_error: OnError specifies how Velero should behave if it encounters an error executing this hook.
:param pulumi.Input[str] timeout: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
"""
pulumi.set(__self__, "command", command)
if container is not None:
pulumi.set(__self__, "container", container)
if on_error is not None:
pulumi.set(__self__, "on_error", on_error)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter
def command(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Command is the command and arguments to execute.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def container(self) -> Optional[pulumi.Input[str]]:
"""
Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used.
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container", value)
@property
@pulumi.getter(name="onError")
def on_error(self) -> Optional[pulumi.Input[str]]:
"""
OnError specifies how Velero should behave if it encounters an error executing this hook.
"""
return pulumi.get(self, "on_error")
@on_error.setter
def on_error(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_error", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class BackupSpecHooksResourcesPreArgs:
def __init__(__self__, *,
exec_: pulumi.Input['BackupSpecHooksResourcesPreExecArgs']):
"""
BackupResourceHook defines a hook for a resource.
:param pulumi.Input['BackupSpecHooksResourcesPreExecArgs'] exec_: Exec defines an exec hook.
"""
pulumi.set(__self__, "exec_", exec_)
@property
@pulumi.getter(name="exec")
def exec_(self) -> pulumi.Input['BackupSpecHooksResourcesPreExecArgs']:
"""
Exec defines an exec hook.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: pulumi.Input['BackupSpecHooksResourcesPreExecArgs']):
pulumi.set(self, "exec_", value)
@pulumi.input_type
class BackupSpecHooksResourcesPreExecArgs:
def __init__(__self__, *,
command: pulumi.Input[Sequence[pulumi.Input[str]]],
container: Optional[pulumi.Input[str]] = None,
on_error: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Exec defines an exec hook.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command and arguments to execute.
:param pulumi.Input[str] container: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used.
:param pulumi.Input[str] on_error: OnError specifies how Velero should behave if it encounters an error executing this hook.
:param pulumi.Input[str] timeout: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
"""
pulumi.set(__self__, "command", command)
if container is not None:
pulumi.set(__self__, "container", container)
if on_error is not None:
pulumi.set(__self__, "on_error", on_error)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter
def command(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Command is the command and arguments to execute.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def container(self) -> Optional[pulumi.Input[str]]:
"""
Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used.
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container", value)
@property
@pulumi.getter(name="onError")
def on_error(self) -> Optional[pulumi.Input[str]]:
"""
OnError specifies how Velero should behave if it encounters an error executing this hook.
"""
return pulumi.get(self, "on_error")
@on_error.setter
def on_error(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_error", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class BackupSpecLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional.
:param pulumi.Input[Sequence[pulumi.Input['BackupSpecLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BackupSpecLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class BackupSpecLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class BackupStatusArgs:
def __init__(__self__, *,
completion_timestamp: Optional[pulumi.Input[str]] = None,
errors: Optional[pulumi.Input[int]] = None,
expiration: Optional[pulumi.Input[str]] = None,
format_version: Optional[pulumi.Input[str]] = None,
phase: Optional[pulumi.Input[str]] = None,
progress: Optional[pulumi.Input['BackupStatusProgressArgs']] = None,
start_timestamp: Optional[pulumi.Input[str]] = None,
validation_errors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
version: Optional[pulumi.Input[int]] = None,
volume_snapshots_attempted: Optional[pulumi.Input[int]] = None,
volume_snapshots_completed: Optional[pulumi.Input[int]] = None,
warnings: Optional[pulumi.Input[int]] = None):
"""
BackupStatus captures the current status of a Velero backup.
:param pulumi.Input[str] completion_timestamp: CompletionTimestamp records the time a backup was completed. Completion time is recorded even on failed backups. Completion time is recorded before uploading the backup object. The server's time is used for CompletionTimestamps
:param pulumi.Input[int] errors: Errors is a count of all error messages that were generated during execution of the backup. The actual errors are in the backup's log file in object storage.
:param pulumi.Input[str] expiration: Expiration is when this Backup is eligible for garbage-collection.
:param pulumi.Input[str] format_version: FormatVersion is the backup format version, including major, minor, and patch version.
:param pulumi.Input[str] phase: Phase is the current state of the Backup.
:param pulumi.Input['BackupStatusProgressArgs'] progress: Progress contains information about the backup's execution progress. Note that this information is best-effort only -- if Velero fails to update it during a backup for any reason, it may be inaccurate/stale.
:param pulumi.Input[str] start_timestamp: StartTimestamp records the time a backup was started. Separate from CreationTimestamp, since that value changes on restores. The server's time is used for StartTimestamps
:param pulumi.Input[Sequence[pulumi.Input[str]]] validation_errors: ValidationErrors is a slice of all validation errors (if applicable).
:param pulumi.Input[int] version: Version is the backup format major version. Deprecated: Please see FormatVersion
:param pulumi.Input[int] volume_snapshots_attempted: VolumeSnapshotsAttempted is the total number of attempted volume snapshots for this backup.
:param pulumi.Input[int] volume_snapshots_completed: VolumeSnapshotsCompleted is the total number of successfully completed volume snapshots for this backup.
:param pulumi.Input[int] warnings: Warnings is a count of all warning messages that were generated during execution of the backup. The actual warnings are in the backup's log file in object storage.
"""
if completion_timestamp is not None:
pulumi.set(__self__, "completion_timestamp", completion_timestamp)
if errors is not None:
pulumi.set(__self__, "errors", errors)
if expiration is not None:
pulumi.set(__self__, "expiration", expiration)
if format_version is not None:
pulumi.set(__self__, "format_version", format_version)
if phase is not None:
pulumi.set(__self__, "phase", phase)
if progress is not None:
pulumi.set(__self__, "progress", progress)
if start_timestamp is not None:
pulumi.set(__self__, "start_timestamp", start_timestamp)
if validation_errors is not None:
pulumi.set(__self__, "validation_errors", validation_errors)
if version is not None:
pulumi.set(__self__, "version", version)
if volume_snapshots_attempted is not None:
pulumi.set(__self__, "volume_snapshots_attempted", volume_snapshots_attempted)
if volume_snapshots_completed is not None:
pulumi.set(__self__, "volume_snapshots_completed", volume_snapshots_completed)
if warnings is not None:
pulumi.set(__self__, "warnings", warnings)
@property
@pulumi.getter(name="completionTimestamp")
def completion_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
CompletionTimestamp records the time a backup was completed. Completion time is recorded even on failed backups. Completion time is recorded before uploading the backup object. The server's time is used for CompletionTimestamps
"""
return pulumi.get(self, "completion_timestamp")
@completion_timestamp.setter
def completion_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "completion_timestamp", value)
@property
@pulumi.getter
def errors(self) -> Optional[pulumi.Input[int]]:
"""
Errors is a count of all error messages that were generated during execution of the backup. The actual errors are in the backup's log file in object storage.
"""
return pulumi.get(self, "errors")
@errors.setter
def errors(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "errors", value)
@property
@pulumi.getter
def expiration(self) -> Optional[pulumi.Input[str]]:
"""
Expiration is when this Backup is eligible for garbage-collection.
"""
return pulumi.get(self, "expiration")
@expiration.setter
def expiration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiration", value)
@property
@pulumi.getter(name="formatVersion")
def format_version(self) -> Optional[pulumi.Input[str]]:
"""
FormatVersion is the backup format version, including major, minor, and patch version.
"""
return pulumi.get(self, "format_version")
@format_version.setter
def format_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "format_version", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current state of the Backup.
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@property
@pulumi.getter
def progress(self) -> Optional[pulumi.Input['BackupStatusProgressArgs']]:
"""
Progress contains information about the backup's execution progress. Note that this information is best-effort only -- if Velero fails to update it during a backup for any reason, it may be inaccurate/stale.
"""
return pulumi.get(self, "progress")
@progress.setter
def progress(self, value: Optional[pulumi.Input['BackupStatusProgressArgs']]):
pulumi.set(self, "progress", value)
@property
@pulumi.getter(name="startTimestamp")
def start_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
StartTimestamp records the time a backup was started. Separate from CreationTimestamp, since that value changes on restores. The server's time is used for StartTimestamps
"""
return pulumi.get(self, "start_timestamp")
@start_timestamp.setter
def start_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_timestamp", value)
@property
@pulumi.getter(name="validationErrors")
def validation_errors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ValidationErrors is a slice of all validation errors (if applicable).
"""
return pulumi.get(self, "validation_errors")
@validation_errors.setter
def validation_errors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "validation_errors", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[int]]:
"""
Version is the backup format major version. Deprecated: Please see FormatVersion
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="volumeSnapshotsAttempted")
def volume_snapshots_attempted(self) -> Optional[pulumi.Input[int]]:
"""
VolumeSnapshotsAttempted is the total number of attempted volume snapshots for this backup.
"""
return pulumi.get(self, "volume_snapshots_attempted")
@volume_snapshots_attempted.setter
def volume_snapshots_attempted(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volume_snapshots_attempted", value)
@property
@pulumi.getter(name="volumeSnapshotsCompleted")
def volume_snapshots_completed(self) -> Optional[pulumi.Input[int]]:
"""
VolumeSnapshotsCompleted is the total number of successfully completed volume snapshots for this backup.
"""
return pulumi.get(self, "volume_snapshots_completed")
@volume_snapshots_completed.setter
def volume_snapshots_completed(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "volume_snapshots_completed", value)
@property
@pulumi.getter
def warnings(self) -> Optional[pulumi.Input[int]]:
"""
Warnings is a count of all warning messages that were generated during execution of the backup. The actual warnings are in the backup's log file in object storage.
"""
return pulumi.get(self, "warnings")
@warnings.setter
def warnings(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "warnings", value)
@pulumi.input_type
class BackupStatusProgressArgs:
def __init__(__self__, *,
items_backed_up: Optional[pulumi.Input[int]] = None,
total_items: Optional[pulumi.Input[int]] = None):
"""
Progress contains information about the backup's execution progress. Note that this information is best-effort only -- if Velero fails to update it during a backup for any reason, it may be inaccurate/stale.
:param pulumi.Input[int] items_backed_up: ItemsBackedUp is the number of items that have actually been written to the backup tarball so far.
:param pulumi.Input[int] total_items: TotalItems is the total number of items to be backed up. This number may change throughout the execution of the backup due to plugins that return additional related items to back up, the velero.io/exclude-from-backup label, and various other filters that happen as items are processed.
"""
if items_backed_up is not None:
pulumi.set(__self__, "items_backed_up", items_backed_up)
if total_items is not None:
pulumi.set(__self__, "total_items", total_items)
@property
@pulumi.getter(name="itemsBackedUp")
def items_backed_up(self) -> Optional[pulumi.Input[int]]:
"""
ItemsBackedUp is the number of items that have actually been written to the backup tarball so far.
"""
return pulumi.get(self, "items_backed_up")
@items_backed_up.setter
def items_backed_up(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "items_backed_up", value)
@property
@pulumi.getter(name="totalItems")
def total_items(self) -> Optional[pulumi.Input[int]]:
"""
TotalItems is the total number of items to be backed up. This number may change throughout the execution of the backup due to plugins that return additional related items to back up, the velero.io/exclude-from-backup label, and various other filters that happen as items are processed.
"""
return pulumi.get(self, "total_items")
@total_items.setter
def total_items(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "total_items", value)
@pulumi.input_type
class BackupStorageLocationSpecArgs:
def __init__(__self__, *,
object_storage: pulumi.Input['BackupStorageLocationSpecObjectStorageArgs'],
provider: pulumi.Input[str],
access_mode: Optional[pulumi.Input[str]] = None,
backup_sync_period: Optional[pulumi.Input[str]] = None,
config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
BackupStorageLocationSpec defines the specification for a Velero BackupStorageLocation.
:param pulumi.Input['BackupStorageLocationSpecObjectStorageArgs'] object_storage: ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage.
:param pulumi.Input[str] provider: Provider is the provider of the backup storage.
:param pulumi.Input[str] access_mode: AccessMode defines the permissions for the backup storage location.
:param pulumi.Input[str] backup_sync_period: BackupSyncPeriod defines how frequently to sync backup API objects from object storage. A value of 0 disables sync.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] config: Config is for provider-specific configuration fields.
"""
pulumi.set(__self__, "object_storage", object_storage)
pulumi.set(__self__, "provider", provider)
if access_mode is not None:
pulumi.set(__self__, "access_mode", access_mode)
if backup_sync_period is not None:
pulumi.set(__self__, "backup_sync_period", backup_sync_period)
if config is not None:
pulumi.set(__self__, "config", config)
@property
@pulumi.getter(name="objectStorage")
def object_storage(self) -> pulumi.Input['BackupStorageLocationSpecObjectStorageArgs']:
"""
ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage.
"""
return pulumi.get(self, "object_storage")
@object_storage.setter
def object_storage(self, value: pulumi.Input['BackupStorageLocationSpecObjectStorageArgs']):
pulumi.set(self, "object_storage", value)
@property
@pulumi.getter
def provider(self) -> pulumi.Input[str]:
"""
Provider is the provider of the backup storage.
"""
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: pulumi.Input[str]):
pulumi.set(self, "provider", value)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> Optional[pulumi.Input[str]]:
"""
AccessMode defines the permissions for the backup storage location.
"""
return pulumi.get(self, "access_mode")
@access_mode.setter
def access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_mode", value)
@property
@pulumi.getter(name="backupSyncPeriod")
def backup_sync_period(self) -> Optional[pulumi.Input[str]]:
"""
BackupSyncPeriod defines how frequently to sync backup API objects from object storage. A value of 0 disables sync.
"""
return pulumi.get(self, "backup_sync_period")
@backup_sync_period.setter
def backup_sync_period(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_sync_period", value)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Config is for provider-specific configuration fields.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "config", value)
@pulumi.input_type
class BackupStorageLocationSpecObjectStorageArgs:
def __init__(__self__, *,
bucket: pulumi.Input[str],
ca_cert: Optional[pulumi.Input[str]] = None,
prefix: Optional[pulumi.Input[str]] = None):
"""
ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage.
:param pulumi.Input[str] bucket: Bucket is the bucket to use for object storage.
:param pulumi.Input[str] ca_cert: CACert defines a CA bundle to use when verifying TLS connections to the provider.
:param pulumi.Input[str] prefix: Prefix is the path inside a bucket to use for Velero storage. Optional.
"""
pulumi.set(__self__, "bucket", bucket)
if ca_cert is not None:
pulumi.set(__self__, "ca_cert", ca_cert)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
Bucket is the bucket to use for object storage.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter(name="caCert")
def ca_cert(self) -> Optional[pulumi.Input[str]]:
"""
CACert defines a CA bundle to use when verifying TLS connections to the provider.
"""
return pulumi.get(self, "ca_cert")
@ca_cert.setter
def ca_cert(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ca_cert", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
Prefix is the path inside a bucket to use for Velero storage. Optional.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@pulumi.input_type
class BackupStorageLocationStatusArgs:
def __init__(__self__, *,
access_mode: Optional[pulumi.Input[str]] = None,
last_synced_revision: Optional[pulumi.Input[str]] = None,
last_synced_time: Optional[pulumi.Input[str]] = None,
phase: Optional[pulumi.Input[str]] = None):
"""
BackupStorageLocationStatus describes the current status of a Velero BackupStorageLocation.
:param pulumi.Input[str] access_mode: AccessMode is an unused field.
Deprecated: there is now an AccessMode field on the Spec and this field will be removed entirely as of v2.0.
:param pulumi.Input[str] last_synced_revision: LastSyncedRevision is the value of the `metadata/revision` file in the backup storage location the last time the BSL's contents were synced into the cluster.
Deprecated: this field is no longer updated or used for detecting changes to the location's contents and will be removed entirely in v2.0.
:param pulumi.Input[str] last_synced_time: LastSyncedTime is the last time the contents of the location were synced into the cluster.
:param pulumi.Input[str] phase: Phase is the current state of the BackupStorageLocation.
"""
if access_mode is not None:
pulumi.set(__self__, "access_mode", access_mode)
if last_synced_revision is not None:
pulumi.set(__self__, "last_synced_revision", last_synced_revision)
if last_synced_time is not None:
pulumi.set(__self__, "last_synced_time", last_synced_time)
if phase is not None:
pulumi.set(__self__, "phase", phase)
@property
@pulumi.getter(name="accessMode")
def access_mode(self) -> Optional[pulumi.Input[str]]:
"""
AccessMode is an unused field.
Deprecated: there is now an AccessMode field on the Spec and this field will be removed entirely as of v2.0.
"""
return pulumi.get(self, "access_mode")
@access_mode.setter
def access_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_mode", value)
@property
@pulumi.getter(name="lastSyncedRevision")
def last_synced_revision(self) -> Optional[pulumi.Input[str]]:
"""
LastSyncedRevision is the value of the `metadata/revision` file in the backup storage location the last time the BSL's contents were synced into the cluster.
Deprecated: this field is no longer updated or used for detecting changes to the location's contents and will be removed entirely in v2.0.
"""
return pulumi.get(self, "last_synced_revision")
@last_synced_revision.setter
def last_synced_revision(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_synced_revision", value)
@property
@pulumi.getter(name="lastSyncedTime")
def last_synced_time(self) -> Optional[pulumi.Input[str]]:
"""
LastSyncedTime is the last time the contents of the location were synced into the cluster.
"""
return pulumi.get(self, "last_synced_time")
@last_synced_time.setter
def last_synced_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_synced_time", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current state of the BackupStorageLocation.
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@pulumi.input_type
class DeleteBackupRequestSpecArgs:
def __init__(__self__, *,
backup_name: pulumi.Input[str]):
"""
DeleteBackupRequestSpec is the specification for which backups to delete.
"""
pulumi.set(__self__, "backup_name", backup_name)
@property
@pulumi.getter(name="backupName")
def backup_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "backup_name")
@backup_name.setter
def backup_name(self, value: pulumi.Input[str]):
pulumi.set(self, "backup_name", value)
@pulumi.input_type
class DeleteBackupRequestStatusArgs:
def __init__(__self__, *,
errors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
phase: Optional[pulumi.Input[str]] = None):
"""
DeleteBackupRequestStatus is the current status of a DeleteBackupRequest.
:param pulumi.Input[Sequence[pulumi.Input[str]]] errors: Errors contains any errors that were encountered during the deletion process.
:param pulumi.Input[str] phase: Phase is the current state of the DeleteBackupRequest.
"""
if errors is not None:
pulumi.set(__self__, "errors", errors)
if phase is not None:
pulumi.set(__self__, "phase", phase)
@property
@pulumi.getter
def errors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Errors contains any errors that were encountered during the deletion process.
"""
return pulumi.get(self, "errors")
@errors.setter
def errors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "errors", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current state of the DeleteBackupRequest.
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@pulumi.input_type
class DownloadRequestSpecArgs:
def __init__(__self__, *,
target: pulumi.Input['DownloadRequestSpecTargetArgs']):
"""
DownloadRequestSpec is the specification for a download request.
:param pulumi.Input['DownloadRequestSpecTargetArgs'] target: Target is what to download (e.g. logs for a backup).
"""
pulumi.set(__self__, "target", target)
@property
@pulumi.getter
def target(self) -> pulumi.Input['DownloadRequestSpecTargetArgs']:
"""
Target is what to download (e.g. logs for a backup).
"""
return pulumi.get(self, "target")
@target.setter
def target(self, value: pulumi.Input['DownloadRequestSpecTargetArgs']):
pulumi.set(self, "target", value)
@pulumi.input_type
class DownloadRequestSpecTargetArgs:
def __init__(__self__, *,
kind: pulumi.Input[str],
name: pulumi.Input[str]):
"""
Target is what to download (e.g. logs for a backup).
:param pulumi.Input[str] kind: Kind is the type of file to download.
:param pulumi.Input[str] name: Name is the name of the kubernetes resource with which the file is associated.
"""
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Kind is the type of file to download.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name is the name of the kubernetes resource with which the file is associated.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class DownloadRequestStatusArgs:
def __init__(__self__, *,
download_url: Optional[pulumi.Input[str]] = None,
expiration: Optional[pulumi.Input[str]] = None,
phase: Optional[pulumi.Input[str]] = None):
"""
DownloadRequestStatus is the current status of a DownloadRequest.
:param pulumi.Input[str] download_url: DownloadURL contains the pre-signed URL for the target file.
:param pulumi.Input[str] expiration: Expiration is when this DownloadRequest expires and can be deleted by the system.
:param pulumi.Input[str] phase: Phase is the current state of the DownloadRequest.
"""
if download_url is not None:
pulumi.set(__self__, "download_url", download_url)
if expiration is not None:
pulumi.set(__self__, "expiration", expiration)
if phase is not None:
pulumi.set(__self__, "phase", phase)
@property
@pulumi.getter(name="downloadURL")
def download_url(self) -> Optional[pulumi.Input[str]]:
"""
DownloadURL contains the pre-signed URL for the target file.
"""
return pulumi.get(self, "download_url")
@download_url.setter
def download_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "download_url", value)
@property
@pulumi.getter
def expiration(self) -> Optional[pulumi.Input[str]]:
"""
Expiration is when this DownloadRequest expires and can be deleted by the system.
"""
return pulumi.get(self, "expiration")
@expiration.setter
def expiration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiration", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current state of the DownloadRequest.
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@pulumi.input_type
class PodVolumeBackupSpecArgs:
def __init__(__self__, *,
backup_storage_location: pulumi.Input[str],
node: pulumi.Input[str],
pod: pulumi.Input['PodVolumeBackupSpecPodArgs'],
repo_identifier: pulumi.Input[str],
volume: pulumi.Input[str],
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
PodVolumeBackupSpec is the specification for a PodVolumeBackup.
:param pulumi.Input[str] backup_storage_location: BackupStorageLocation is the name of the backup storage location where the restic repository is stored.
:param pulumi.Input[str] node: Node is the name of the node that the Pod is running on.
:param pulumi.Input['PodVolumeBackupSpecPodArgs'] pod: Pod is a reference to the pod containing the volume to be backed up.
:param pulumi.Input[str] repo_identifier: RepoIdentifier is the restic repository identifier.
:param pulumi.Input[str] volume: Volume is the name of the volume within the Pod to be backed up.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Tags are a map of key-value pairs that should be applied to the volume backup as tags.
"""
pulumi.set(__self__, "backup_storage_location", backup_storage_location)
pulumi.set(__self__, "node", node)
pulumi.set(__self__, "pod", pod)
pulumi.set(__self__, "repo_identifier", repo_identifier)
pulumi.set(__self__, "volume", volume)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="backupStorageLocation")
def backup_storage_location(self) -> pulumi.Input[str]:
"""
BackupStorageLocation is the name of the backup storage location where the restic repository is stored.
"""
return pulumi.get(self, "backup_storage_location")
@backup_storage_location.setter
def backup_storage_location(self, value: pulumi.Input[str]):
pulumi.set(self, "backup_storage_location", value)
@property
@pulumi.getter
def node(self) -> pulumi.Input[str]:
"""
Node is the name of the node that the Pod is running on.
"""
return pulumi.get(self, "node")
@node.setter
def node(self, value: pulumi.Input[str]):
pulumi.set(self, "node", value)
@property
@pulumi.getter
def pod(self) -> pulumi.Input['PodVolumeBackupSpecPodArgs']:
"""
Pod is a reference to the pod containing the volume to be backed up.
"""
return pulumi.get(self, "pod")
@pod.setter
def pod(self, value: pulumi.Input['PodVolumeBackupSpecPodArgs']):
pulumi.set(self, "pod", value)
@property
@pulumi.getter(name="repoIdentifier")
def repo_identifier(self) -> pulumi.Input[str]:
"""
RepoIdentifier is the restic repository identifier.
"""
return pulumi.get(self, "repo_identifier")
@repo_identifier.setter
def repo_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "repo_identifier", value)
@property
@pulumi.getter
def volume(self) -> pulumi.Input[str]:
"""
Volume is the name of the volume within the Pod to be backed up.
"""
return pulumi.get(self, "volume")
@volume.setter
def volume(self, value: pulumi.Input[str]):
pulumi.set(self, "volume", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Tags are a map of key-value pairs that should be applied to the volume backup as tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class PodVolumeBackupSpecPodArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
field_path: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
resource_version: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None):
"""
Pod is a reference to the pod containing the volume to be backed up.
:param pulumi.Input[str] api_version: API version of the referent.
:param pulumi.Input[str] field_path: If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
:param pulumi.Input[str] kind: Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param pulumi.Input[str] namespace: Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param pulumi.Input[str] resource_version: Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
:param pulumi.Input[str] uid: UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
API version of the referent.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[pulumi.Input[str]]:
"""
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[pulumi.Input[str]]:
"""
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@resource_version.setter
def resource_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_version", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
@pulumi.input_type
class PodVolumeBackupStatusArgs:
def __init__(__self__, *,
completion_timestamp: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
phase: Optional[pulumi.Input[str]] = None,
progress: Optional[pulumi.Input['PodVolumeBackupStatusProgressArgs']] = None,
snapshot_id: Optional[pulumi.Input[str]] = None,
start_timestamp: Optional[pulumi.Input[str]] = None):
"""
PodVolumeBackupStatus is the current status of a PodVolumeBackup.
:param pulumi.Input[str] completion_timestamp: CompletionTimestamp records the time a backup was completed. Completion time is recorded even on failed backups. Completion time is recorded before uploading the backup object. The server's time is used for CompletionTimestamps
:param pulumi.Input[str] message: Message is a message about the pod volume backup's status.
:param pulumi.Input[str] path: Path is the full path within the controller pod being backed up.
:param pulumi.Input[str] phase: Phase is the current state of the PodVolumeBackup.
:param pulumi.Input['PodVolumeBackupStatusProgressArgs'] progress: Progress holds the total number of bytes of the volume and the current number of backed up bytes. This can be used to display progress information about the backup operation.
:param pulumi.Input[str] snapshot_id: SnapshotID is the identifier for the snapshot of the pod volume.
:param pulumi.Input[str] start_timestamp: StartTimestamp records the time a backup was started. Separate from CreationTimestamp, since that value changes on restores. The server's time is used for StartTimestamps
"""
if completion_timestamp is not None:
pulumi.set(__self__, "completion_timestamp", completion_timestamp)
if message is not None:
pulumi.set(__self__, "message", message)
if path is not None:
pulumi.set(__self__, "path", path)
if phase is not None:
pulumi.set(__self__, "phase", phase)
if progress is not None:
pulumi.set(__self__, "progress", progress)
if snapshot_id is not None:
pulumi.set(__self__, "snapshot_id", snapshot_id)
if start_timestamp is not None:
pulumi.set(__self__, "start_timestamp", start_timestamp)
@property
@pulumi.getter(name="completionTimestamp")
def completion_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
CompletionTimestamp records the time a backup was completed. Completion time is recorded even on failed backups. Completion time is recorded before uploading the backup object. The server's time is used for CompletionTimestamps
"""
return pulumi.get(self, "completion_timestamp")
@completion_timestamp.setter
def completion_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "completion_timestamp", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
Message is a message about the pod volume backup's status.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path is the full path within the controller pod being backed up.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current state of the PodVolumeBackup.
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@property
@pulumi.getter
def progress(self) -> Optional[pulumi.Input['PodVolumeBackupStatusProgressArgs']]:
"""
Progress holds the total number of bytes of the volume and the current number of backed up bytes. This can be used to display progress information about the backup operation.
"""
return pulumi.get(self, "progress")
@progress.setter
def progress(self, value: Optional[pulumi.Input['PodVolumeBackupStatusProgressArgs']]):
pulumi.set(self, "progress", value)
@property
@pulumi.getter(name="snapshotID")
def snapshot_id(self) -> Optional[pulumi.Input[str]]:
"""
SnapshotID is the identifier for the snapshot of the pod volume.
"""
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter(name="startTimestamp")
def start_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
StartTimestamp records the time a backup was started. Separate from CreationTimestamp, since that value changes on restores. The server's time is used for StartTimestamps
"""
return pulumi.get(self, "start_timestamp")
@start_timestamp.setter
def start_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_timestamp", value)
@pulumi.input_type
class PodVolumeBackupStatusProgressArgs:
def __init__(__self__, *,
bytes_done: Optional[pulumi.Input[int]] = None,
total_bytes: Optional[pulumi.Input[int]] = None):
"""
Progress holds the total number of bytes of the volume and the current number of backed up bytes. This can be used to display progress information about the backup operation.
"""
if bytes_done is not None:
pulumi.set(__self__, "bytes_done", bytes_done)
if total_bytes is not None:
pulumi.set(__self__, "total_bytes", total_bytes)
@property
@pulumi.getter(name="bytesDone")
def bytes_done(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "bytes_done")
@bytes_done.setter
def bytes_done(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bytes_done", value)
@property
@pulumi.getter(name="totalBytes")
def total_bytes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "total_bytes")
@total_bytes.setter
def total_bytes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "total_bytes", value)
@pulumi.input_type
class PodVolumeRestoreSpecArgs:
def __init__(__self__, *,
backup_storage_location: pulumi.Input[str],
pod: pulumi.Input['PodVolumeRestoreSpecPodArgs'],
repo_identifier: pulumi.Input[str],
snapshot_id: pulumi.Input[str],
volume: pulumi.Input[str]):
"""
PodVolumeRestoreSpec is the specification for a PodVolumeRestore.
:param pulumi.Input[str] backup_storage_location: BackupStorageLocation is the name of the backup storage location where the restic repository is stored.
:param pulumi.Input['PodVolumeRestoreSpecPodArgs'] pod: Pod is a reference to the pod containing the volume to be restored.
:param pulumi.Input[str] repo_identifier: RepoIdentifier is the restic repository identifier.
:param pulumi.Input[str] snapshot_id: SnapshotID is the ID of the volume snapshot to be restored.
:param pulumi.Input[str] volume: Volume is the name of the volume within the Pod to be restored.
"""
pulumi.set(__self__, "backup_storage_location", backup_storage_location)
pulumi.set(__self__, "pod", pod)
pulumi.set(__self__, "repo_identifier", repo_identifier)
pulumi.set(__self__, "snapshot_id", snapshot_id)
pulumi.set(__self__, "volume", volume)
@property
@pulumi.getter(name="backupStorageLocation")
def backup_storage_location(self) -> pulumi.Input[str]:
"""
BackupStorageLocation is the name of the backup storage location where the restic repository is stored.
"""
return pulumi.get(self, "backup_storage_location")
@backup_storage_location.setter
def backup_storage_location(self, value: pulumi.Input[str]):
pulumi.set(self, "backup_storage_location", value)
@property
@pulumi.getter
def pod(self) -> pulumi.Input['PodVolumeRestoreSpecPodArgs']:
"""
Pod is a reference to the pod containing the volume to be restored.
"""
return pulumi.get(self, "pod")
@pod.setter
def pod(self, value: pulumi.Input['PodVolumeRestoreSpecPodArgs']):
pulumi.set(self, "pod", value)
@property
@pulumi.getter(name="repoIdentifier")
def repo_identifier(self) -> pulumi.Input[str]:
"""
RepoIdentifier is the restic repository identifier.
"""
return pulumi.get(self, "repo_identifier")
@repo_identifier.setter
def repo_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "repo_identifier", value)
@property
@pulumi.getter(name="snapshotID")
def snapshot_id(self) -> pulumi.Input[str]:
"""
SnapshotID is the ID of the volume snapshot to be restored.
"""
return pulumi.get(self, "snapshot_id")
@snapshot_id.setter
def snapshot_id(self, value: pulumi.Input[str]):
pulumi.set(self, "snapshot_id", value)
@property
@pulumi.getter
def volume(self) -> pulumi.Input[str]:
"""
Volume is the name of the volume within the Pod to be restored.
"""
return pulumi.get(self, "volume")
@volume.setter
def volume(self, value: pulumi.Input[str]):
pulumi.set(self, "volume", value)
@pulumi.input_type
class PodVolumeRestoreSpecPodArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
field_path: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
resource_version: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None):
"""
Pod is a reference to the pod containing the volume to be restored.
:param pulumi.Input[str] api_version: API version of the referent.
:param pulumi.Input[str] field_path: If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
:param pulumi.Input[str] kind: Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param pulumi.Input[str] namespace: Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param pulumi.Input[str] resource_version: Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
:param pulumi.Input[str] uid: UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
API version of the referent.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[pulumi.Input[str]]:
"""
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[pulumi.Input[str]]:
"""
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@resource_version.setter
def resource_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_version", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
@pulumi.input_type
class PodVolumeRestoreStatusArgs:
def __init__(__self__, *,
completion_timestamp: Optional[pulumi.Input[str]] = None,
errors: Optional[pulumi.Input[int]] = None,
message: Optional[pulumi.Input[str]] = None,
phase: Optional[pulumi.Input[str]] = None,
progress: Optional[pulumi.Input['PodVolumeRestoreStatusProgressArgs']] = None,
restic_pod: Optional[pulumi.Input[str]] = None,
start_timestamp: Optional[pulumi.Input[str]] = None,
verify_errors: Optional[pulumi.Input[int]] = None):
"""
PodVolumeRestoreStatus is the current status of a PodVolumeRestore.
:param pulumi.Input[str] completion_timestamp: CompletionTimestamp records the time a restore was completed. Completion time is recorded even on failed restores. The server's time is used for CompletionTimestamps
:param pulumi.Input[int] errors: Errors is a count of all error messages that were generated during execution of the pod volume restore. The actual errors are in the restic log
:param pulumi.Input[str] message: Message is a message about the pod volume restore's status.
:param pulumi.Input[str] phase: Phase is the current state of the PodVolumeRestore.
:param pulumi.Input['PodVolumeRestoreStatusProgressArgs'] progress: Progress holds the total number of bytes of the snapshot and the current number of restored bytes. This can be used to display progress information about the restore operation.
:param pulumi.Input[str] restic_pod: ResticPod is the name of the restic pod which processed the restore. Any errors referenced in Errors or VerifyErrors will be logged in this pod's log.
:param pulumi.Input[str] start_timestamp: StartTimestamp records the time a restore was started. The server's time is used for StartTimestamps
:param pulumi.Input[int] verify_errors: VerifyErrors is a count of all verification-related error messages that were generated during execution of the pod volume restore. The actual errors are in the restic log
"""
if completion_timestamp is not None:
pulumi.set(__self__, "completion_timestamp", completion_timestamp)
if errors is not None:
pulumi.set(__self__, "errors", errors)
if message is not None:
pulumi.set(__self__, "message", message)
if phase is not None:
pulumi.set(__self__, "phase", phase)
if progress is not None:
pulumi.set(__self__, "progress", progress)
if restic_pod is not None:
pulumi.set(__self__, "restic_pod", restic_pod)
if start_timestamp is not None:
pulumi.set(__self__, "start_timestamp", start_timestamp)
if verify_errors is not None:
pulumi.set(__self__, "verify_errors", verify_errors)
@property
@pulumi.getter(name="completionTimestamp")
def completion_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
CompletionTimestamp records the time a restore was completed. Completion time is recorded even on failed restores. The server's time is used for CompletionTimestamps
"""
return pulumi.get(self, "completion_timestamp")
@completion_timestamp.setter
def completion_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "completion_timestamp", value)
@property
@pulumi.getter
def errors(self) -> Optional[pulumi.Input[int]]:
"""
Errors is a count of all error messages that were generated during execution of the pod volume restore. The actual errors are in the restic log
"""
return pulumi.get(self, "errors")
@errors.setter
def errors(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "errors", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
Message is a message about the pod volume restore's status.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current state of the PodVolumeRestore.
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@property
@pulumi.getter
def progress(self) -> Optional[pulumi.Input['PodVolumeRestoreStatusProgressArgs']]:
"""
Progress holds the total number of bytes of the snapshot and the current number of restored bytes. This can be used to display progress information about the restore operation.
"""
return pulumi.get(self, "progress")
@progress.setter
def progress(self, value: Optional[pulumi.Input['PodVolumeRestoreStatusProgressArgs']]):
pulumi.set(self, "progress", value)
@property
@pulumi.getter(name="resticPod")
def restic_pod(self) -> Optional[pulumi.Input[str]]:
"""
ResticPod is the name of the restic pod which processed the restore. Any errors referenced in Errors or VerifyErrors will be logged in this pod's log.
"""
return pulumi.get(self, "restic_pod")
@restic_pod.setter
def restic_pod(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restic_pod", value)
@property
@pulumi.getter(name="startTimestamp")
def start_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
StartTimestamp records the time a restore was started. The server's time is used for StartTimestamps
"""
return pulumi.get(self, "start_timestamp")
@start_timestamp.setter
def start_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_timestamp", value)
@property
@pulumi.getter(name="verifyErrors")
def verify_errors(self) -> Optional[pulumi.Input[int]]:
"""
VerifyErrors is a count of all verification-related error messages that were generated during execution of the pod volume restore. The actual errors are in the restic log
"""
return pulumi.get(self, "verify_errors")
@verify_errors.setter
def verify_errors(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "verify_errors", value)
@pulumi.input_type
class PodVolumeRestoreStatusProgressArgs:
def __init__(__self__, *,
bytes_done: Optional[pulumi.Input[int]] = None,
total_bytes: Optional[pulumi.Input[int]] = None):
"""
Progress holds the total number of bytes of the snapshot and the current number of restored bytes. This can be used to display progress information about the restore operation.
"""
if bytes_done is not None:
pulumi.set(__self__, "bytes_done", bytes_done)
if total_bytes is not None:
pulumi.set(__self__, "total_bytes", total_bytes)
@property
@pulumi.getter(name="bytesDone")
def bytes_done(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "bytes_done")
@bytes_done.setter
def bytes_done(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bytes_done", value)
@property
@pulumi.getter(name="totalBytes")
def total_bytes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "total_bytes")
@total_bytes.setter
def total_bytes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "total_bytes", value)
@pulumi.input_type
class ResticRepositorySpecArgs:
def __init__(__self__, *,
backup_storage_location: pulumi.Input[str],
maintenance_frequency: pulumi.Input[str],
restic_identifier: pulumi.Input[str],
volume_namespace: pulumi.Input[str]):
"""
ResticRepositorySpec is the specification for a ResticRepository.
:param pulumi.Input[str] backup_storage_location: BackupStorageLocation is the name of the BackupStorageLocation that should contain this repository.
:param pulumi.Input[str] maintenance_frequency: MaintenanceFrequency is how often maintenance should be run.
:param pulumi.Input[str] restic_identifier: ResticIdentifier is the full restic-compatible string for identifying this repository.
:param pulumi.Input[str] volume_namespace: VolumeNamespace is the namespace this restic repository contains pod volume backups for.
"""
pulumi.set(__self__, "backup_storage_location", backup_storage_location)
pulumi.set(__self__, "maintenance_frequency", maintenance_frequency)
pulumi.set(__self__, "restic_identifier", restic_identifier)
pulumi.set(__self__, "volume_namespace", volume_namespace)
@property
@pulumi.getter(name="backupStorageLocation")
def backup_storage_location(self) -> pulumi.Input[str]:
"""
BackupStorageLocation is the name of the BackupStorageLocation that should contain this repository.
"""
return pulumi.get(self, "backup_storage_location")
@backup_storage_location.setter
def backup_storage_location(self, value: pulumi.Input[str]):
pulumi.set(self, "backup_storage_location", value)
@property
@pulumi.getter(name="maintenanceFrequency")
def maintenance_frequency(self) -> pulumi.Input[str]:
"""
MaintenanceFrequency is how often maintenance should be run.
"""
return pulumi.get(self, "maintenance_frequency")
@maintenance_frequency.setter
def maintenance_frequency(self, value: pulumi.Input[str]):
pulumi.set(self, "maintenance_frequency", value)
@property
@pulumi.getter(name="resticIdentifier")
def restic_identifier(self) -> pulumi.Input[str]:
"""
ResticIdentifier is the full restic-compatible string for identifying this repository.
"""
return pulumi.get(self, "restic_identifier")
@restic_identifier.setter
def restic_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "restic_identifier", value)
@property
@pulumi.getter(name="volumeNamespace")
def volume_namespace(self) -> pulumi.Input[str]:
"""
VolumeNamespace is the namespace this restic repository contains pod volume backups for.
"""
return pulumi.get(self, "volume_namespace")
@volume_namespace.setter
def volume_namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "volume_namespace", value)
@pulumi.input_type
class ResticRepositoryStatusArgs:
def __init__(__self__, *,
last_maintenance_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
phase: Optional[pulumi.Input[str]] = None):
"""
ResticRepositoryStatus is the current status of a ResticRepository.
:param pulumi.Input[str] last_maintenance_time: LastMaintenanceTime is the last time maintenance was run.
:param pulumi.Input[str] message: Message is a message about the current status of the ResticRepository.
:param pulumi.Input[str] phase: Phase is the current state of the ResticRepository.
"""
if last_maintenance_time is not None:
pulumi.set(__self__, "last_maintenance_time", last_maintenance_time)
if message is not None:
pulumi.set(__self__, "message", message)
if phase is not None:
pulumi.set(__self__, "phase", phase)
@property
@pulumi.getter(name="lastMaintenanceTime")
def last_maintenance_time(self) -> Optional[pulumi.Input[str]]:
"""
LastMaintenanceTime is the last time maintenance was run.
"""
return pulumi.get(self, "last_maintenance_time")
@last_maintenance_time.setter
def last_maintenance_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_maintenance_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
Message is a message about the current status of the ResticRepository.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current state of the ResticRepository.
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@pulumi.input_type
class RestoreSpecArgs:
def __init__(__self__, *,
backup_name: pulumi.Input[str],
excluded_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
excluded_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
include_cluster_resources: Optional[pulumi.Input[bool]] = None,
included_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
included_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
label_selector: Optional[pulumi.Input['RestoreSpecLabelSelectorArgs']] = None,
namespace_mapping: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
restore_pvs: Optional[pulumi.Input[bool]] = None,
schedule_name: Optional[pulumi.Input[str]] = None):
"""
RestoreSpec defines the specification for a Velero restore.
:param pulumi.Input[str] backup_name: BackupName is the unique name of the Velero backup to restore from.
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_namespaces: ExcludedNamespaces contains a list of namespaces that are not included in the restore.
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_resources: ExcludedResources is a slice of resource names that are not included in the restore.
:param pulumi.Input[bool] include_cluster_resources: IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the restore. If null, defaults to true.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_namespaces: IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_resources: IncludedResources is a slice of resource names to include in the restore. If empty, all resources in the backup are included.
:param pulumi.Input['RestoreSpecLabelSelectorArgs'] label_selector: LabelSelector is a metav1.LabelSelector to filter with when restoring individual objects from the backup. If empty or nil, all objects are included. Optional.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] namespace_mapping: NamespaceMapping is a map of source namespace names to target namespace names to restore into. Any source namespaces not included in the map will be restored into namespaces of the same name.
:param pulumi.Input[bool] restore_pvs: RestorePVs specifies whether to restore all included PVs from snapshot (via the cloudprovider).
:param pulumi.Input[str] schedule_name: ScheduleName is the unique name of the Velero schedule to restore from. If specified, and BackupName is empty, Velero will restore from the most recent successful backup created from this schedule.
"""
pulumi.set(__self__, "backup_name", backup_name)
if excluded_namespaces is not None:
pulumi.set(__self__, "excluded_namespaces", excluded_namespaces)
if excluded_resources is not None:
pulumi.set(__self__, "excluded_resources", excluded_resources)
if include_cluster_resources is not None:
pulumi.set(__self__, "include_cluster_resources", include_cluster_resources)
if included_namespaces is not None:
pulumi.set(__self__, "included_namespaces", included_namespaces)
if included_resources is not None:
pulumi.set(__self__, "included_resources", included_resources)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if namespace_mapping is not None:
pulumi.set(__self__, "namespace_mapping", namespace_mapping)
if restore_pvs is not None:
pulumi.set(__self__, "restore_pvs", restore_pvs)
if schedule_name is not None:
pulumi.set(__self__, "schedule_name", schedule_name)
@property
@pulumi.getter(name="backupName")
def backup_name(self) -> pulumi.Input[str]:
"""
BackupName is the unique name of the Velero backup to restore from.
"""
return pulumi.get(self, "backup_name")
@backup_name.setter
def backup_name(self, value: pulumi.Input[str]):
pulumi.set(self, "backup_name", value)
@property
@pulumi.getter(name="excludedNamespaces")
def excluded_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedNamespaces contains a list of namespaces that are not included in the restore.
"""
return pulumi.get(self, "excluded_namespaces")
@excluded_namespaces.setter
def excluded_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_namespaces", value)
@property
@pulumi.getter(name="excludedResources")
def excluded_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedResources is a slice of resource names that are not included in the restore.
"""
return pulumi.get(self, "excluded_resources")
@excluded_resources.setter
def excluded_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_resources", value)
@property
@pulumi.getter(name="includeClusterResources")
def include_cluster_resources(self) -> Optional[pulumi.Input[bool]]:
"""
IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the restore. If null, defaults to true.
"""
return pulumi.get(self, "include_cluster_resources")
@include_cluster_resources.setter
def include_cluster_resources(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_cluster_resources", value)
@property
@pulumi.getter(name="includedNamespaces")
def included_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included.
"""
return pulumi.get(self, "included_namespaces")
@included_namespaces.setter
def included_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_namespaces", value)
@property
@pulumi.getter(name="includedResources")
def included_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedResources is a slice of resource names to include in the restore. If empty, all resources in the backup are included.
"""
return pulumi.get(self, "included_resources")
@included_resources.setter
def included_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_resources", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['RestoreSpecLabelSelectorArgs']]:
"""
LabelSelector is a metav1.LabelSelector to filter with when restoring individual objects from the backup. If empty or nil, all objects are included. Optional.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['RestoreSpecLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter(name="namespaceMapping")
def namespace_mapping(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
NamespaceMapping is a map of source namespace names to target namespace names to restore into. Any source namespaces not included in the map will be restored into namespaces of the same name.
"""
return pulumi.get(self, "namespace_mapping")
@namespace_mapping.setter
def namespace_mapping(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "namespace_mapping", value)
@property
@pulumi.getter(name="restorePVs")
def restore_pvs(self) -> Optional[pulumi.Input[bool]]:
"""
RestorePVs specifies whether to restore all included PVs from snapshot (via the cloudprovider).
"""
return pulumi.get(self, "restore_pvs")
@restore_pvs.setter
def restore_pvs(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "restore_pvs", value)
@property
@pulumi.getter(name="scheduleName")
def schedule_name(self) -> Optional[pulumi.Input[str]]:
"""
ScheduleName is the unique name of the Velero schedule to restore from. If specified, and BackupName is empty, Velero will restore from the most recent successful backup created from this schedule.
"""
return pulumi.get(self, "schedule_name")
@schedule_name.setter
def schedule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schedule_name", value)
@pulumi.input_type
class RestoreSpecLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['RestoreSpecLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
LabelSelector is a metav1.LabelSelector to filter with when restoring individual objects from the backup. If empty or nil, all objects are included. Optional.
:param pulumi.Input[Sequence[pulumi.Input['RestoreSpecLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RestoreSpecLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RestoreSpecLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class RestoreSpecLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class RestoreStatusArgs:
def __init__(__self__, *,
errors: Optional[pulumi.Input[int]] = None,
failure_reason: Optional[pulumi.Input[str]] = None,
phase: Optional[pulumi.Input[str]] = None,
pod_volume_restore_errors: Optional[pulumi.Input[Sequence[pulumi.Input['RestoreStatusPodVolumeRestoreErrorsArgs']]]] = None,
pod_volume_restore_verify_errors: Optional[pulumi.Input[Sequence[pulumi.Input['RestoreStatusPodVolumeRestoreVerifyErrorsArgs']]]] = None,
validation_errors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
warnings: Optional[pulumi.Input[int]] = None):
"""
RestoreStatus captures the current status of a Velero restore
:param pulumi.Input[int] errors: Errors is a count of all error messages that were generated during execution of the restore. The actual errors are stored in object storage.
:param pulumi.Input[str] failure_reason: FailureReason is an error that caused the entire restore to fail.
:param pulumi.Input[str] phase: Phase is the current state of the Restore
:param pulumi.Input[Sequence[pulumi.Input['RestoreStatusPodVolumeRestoreErrorsArgs']]] pod_volume_restore_errors: PodVolumeRestoreErrors is a slice of all PodVolumeRestores with errors (errors encountered by restic when restoring a pod) (if applicable)
:param pulumi.Input[Sequence[pulumi.Input['RestoreStatusPodVolumeRestoreVerifyErrorsArgs']]] pod_volume_restore_verify_errors: PodVolumeRestoreVerifyErrors is a slice of all PodVolumeRestore errors from restore verification (errors encountered by restic when verifying a pod restore) (if applicable)
:param pulumi.Input[Sequence[pulumi.Input[str]]] validation_errors: ValidationErrors is a slice of all validation errors (if applicable)
:param pulumi.Input[int] warnings: Warnings is a count of all warning messages that were generated during execution of the restore. The actual warnings are stored in object storage.
"""
if errors is not None:
pulumi.set(__self__, "errors", errors)
if failure_reason is not None:
pulumi.set(__self__, "failure_reason", failure_reason)
if phase is not None:
pulumi.set(__self__, "phase", phase)
if pod_volume_restore_errors is not None:
pulumi.set(__self__, "pod_volume_restore_errors", pod_volume_restore_errors)
if pod_volume_restore_verify_errors is not None:
pulumi.set(__self__, "pod_volume_restore_verify_errors", pod_volume_restore_verify_errors)
if validation_errors is not None:
pulumi.set(__self__, "validation_errors", validation_errors)
if warnings is not None:
pulumi.set(__self__, "warnings", warnings)
@property
@pulumi.getter
def errors(self) -> Optional[pulumi.Input[int]]:
"""
Errors is a count of all error messages that were generated during execution of the restore. The actual errors are stored in object storage.
"""
return pulumi.get(self, "errors")
@errors.setter
def errors(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "errors", value)
@property
@pulumi.getter(name="failureReason")
def failure_reason(self) -> Optional[pulumi.Input[str]]:
"""
FailureReason is an error that caused the entire restore to fail.
"""
return pulumi.get(self, "failure_reason")
@failure_reason.setter
def failure_reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "failure_reason", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current state of the Restore
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@property
@pulumi.getter(name="podVolumeRestoreErrors")
def pod_volume_restore_errors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RestoreStatusPodVolumeRestoreErrorsArgs']]]]:
"""
PodVolumeRestoreErrors is a slice of all PodVolumeRestores with errors (errors encountered by restic when restoring a pod) (if applicable)
"""
return pulumi.get(self, "pod_volume_restore_errors")
@pod_volume_restore_errors.setter
def pod_volume_restore_errors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RestoreStatusPodVolumeRestoreErrorsArgs']]]]):
pulumi.set(self, "pod_volume_restore_errors", value)
@property
@pulumi.getter(name="podVolumeRestoreVerifyErrors")
def pod_volume_restore_verify_errors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RestoreStatusPodVolumeRestoreVerifyErrorsArgs']]]]:
"""
PodVolumeRestoreVerifyErrors is a slice of all PodVolumeRestore errors from restore verification (errors encountered by restic when verifying a pod restore) (if applicable)
"""
return pulumi.get(self, "pod_volume_restore_verify_errors")
@pod_volume_restore_verify_errors.setter
def pod_volume_restore_verify_errors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RestoreStatusPodVolumeRestoreVerifyErrorsArgs']]]]):
pulumi.set(self, "pod_volume_restore_verify_errors", value)
@property
@pulumi.getter(name="validationErrors")
def validation_errors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ValidationErrors is a slice of all validation errors (if applicable)
"""
return pulumi.get(self, "validation_errors")
@validation_errors.setter
def validation_errors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "validation_errors", value)
@property
@pulumi.getter
def warnings(self) -> Optional[pulumi.Input[int]]:
"""
Warnings is a count of all warning messages that were generated during execution of the restore. The actual warnings are stored in object storage.
"""
return pulumi.get(self, "warnings")
@warnings.setter
def warnings(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "warnings", value)
@pulumi.input_type
class RestoreStatusPodVolumeRestoreErrorsArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
field_path: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
resource_version: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None):
"""
ObjectReference contains enough information to let you inspect or modify the referred object.
:param pulumi.Input[str] api_version: API version of the referent.
:param pulumi.Input[str] field_path: If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
:param pulumi.Input[str] kind: Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param pulumi.Input[str] namespace: Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param pulumi.Input[str] resource_version: Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
:param pulumi.Input[str] uid: UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
API version of the referent.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[pulumi.Input[str]]:
"""
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[pulumi.Input[str]]:
"""
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@resource_version.setter
def resource_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_version", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
@pulumi.input_type
class RestoreStatusPodVolumeRestoreVerifyErrorsArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
field_path: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
resource_version: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None):
"""
ObjectReference contains enough information to let you inspect or modify the referred object.
:param pulumi.Input[str] api_version: API version of the referent.
:param pulumi.Input[str] field_path: If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
:param pulumi.Input[str] kind: Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
:param pulumi.Input[str] namespace: Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
:param pulumi.Input[str] resource_version: Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
:param pulumi.Input[str] uid: UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
API version of the referent.
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[pulumi.Input[str]]:
"""
If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.
"""
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_path", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[pulumi.Input[str]]:
"""
Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@resource_version.setter
def resource_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_version", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
@pulumi.input_type
class ScheduleSpecArgs:
def __init__(__self__, *,
schedule: pulumi.Input[str],
template: pulumi.Input['ScheduleSpecTemplateArgs']):
"""
ScheduleSpec defines the specification for a Velero schedule
:param pulumi.Input[str] schedule: Schedule is a Cron expression defining when to run the Backup.
:param pulumi.Input['ScheduleSpecTemplateArgs'] template: Template is the definition of the Backup to be run on the provided schedule
"""
pulumi.set(__self__, "schedule", schedule)
pulumi.set(__self__, "template", template)
@property
@pulumi.getter
def schedule(self) -> pulumi.Input[str]:
"""
Schedule is a Cron expression defining when to run the Backup.
"""
return pulumi.get(self, "schedule")
@schedule.setter
def schedule(self, value: pulumi.Input[str]):
pulumi.set(self, "schedule", value)
@property
@pulumi.getter
def template(self) -> pulumi.Input['ScheduleSpecTemplateArgs']:
"""
Template is the definition of the Backup to be run on the provided schedule
"""
return pulumi.get(self, "template")
@template.setter
def template(self, value: pulumi.Input['ScheduleSpecTemplateArgs']):
pulumi.set(self, "template", value)
@pulumi.input_type
class ScheduleSpecTemplateArgs:
def __init__(__self__, *,
excluded_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
excluded_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hooks: Optional[pulumi.Input['ScheduleSpecTemplateHooksArgs']] = None,
include_cluster_resources: Optional[pulumi.Input[bool]] = None,
included_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
included_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
label_selector: Optional[pulumi.Input['ScheduleSpecTemplateLabelSelectorArgs']] = None,
snapshot_volumes: Optional[pulumi.Input[bool]] = None,
storage_location: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[str]] = None,
volume_snapshot_locations: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Template is the definition of the Backup to be run on the provided schedule
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_namespaces: ExcludedNamespaces contains a list of namespaces that are not included in the backup.
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_resources: ExcludedResources is a slice of resource names that are not included in the backup.
:param pulumi.Input['ScheduleSpecTemplateHooksArgs'] hooks: Hooks represent custom behaviors that should be executed at different phases of the backup.
:param pulumi.Input[bool] include_cluster_resources: IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the backup.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_namespaces: IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_resources: IncludedResources is a slice of resource names to include in the backup. If empty, all resources are included.
:param pulumi.Input['ScheduleSpecTemplateLabelSelectorArgs'] label_selector: LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional.
:param pulumi.Input[bool] snapshot_volumes: SnapshotVolumes specifies whether to take cloud snapshots of any PV's referenced in the set of objects included in the Backup.
:param pulumi.Input[str] storage_location: StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored.
:param pulumi.Input[str] ttl: TTL is a time.Duration-parseable string describing how long the Backup should be retained for.
:param pulumi.Input[Sequence[pulumi.Input[str]]] volume_snapshot_locations: VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup.
"""
if excluded_namespaces is not None:
pulumi.set(__self__, "excluded_namespaces", excluded_namespaces)
if excluded_resources is not None:
pulumi.set(__self__, "excluded_resources", excluded_resources)
if hooks is not None:
pulumi.set(__self__, "hooks", hooks)
if include_cluster_resources is not None:
pulumi.set(__self__, "include_cluster_resources", include_cluster_resources)
if included_namespaces is not None:
pulumi.set(__self__, "included_namespaces", included_namespaces)
if included_resources is not None:
pulumi.set(__self__, "included_resources", included_resources)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if snapshot_volumes is not None:
pulumi.set(__self__, "snapshot_volumes", snapshot_volumes)
if storage_location is not None:
pulumi.set(__self__, "storage_location", storage_location)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
if volume_snapshot_locations is not None:
pulumi.set(__self__, "volume_snapshot_locations", volume_snapshot_locations)
@property
@pulumi.getter(name="excludedNamespaces")
def excluded_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedNamespaces contains a list of namespaces that are not included in the backup.
"""
return pulumi.get(self, "excluded_namespaces")
@excluded_namespaces.setter
def excluded_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_namespaces", value)
@property
@pulumi.getter(name="excludedResources")
def excluded_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedResources is a slice of resource names that are not included in the backup.
"""
return pulumi.get(self, "excluded_resources")
@excluded_resources.setter
def excluded_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_resources", value)
@property
@pulumi.getter
def hooks(self) -> Optional[pulumi.Input['ScheduleSpecTemplateHooksArgs']]:
"""
Hooks represent custom behaviors that should be executed at different phases of the backup.
"""
return pulumi.get(self, "hooks")
@hooks.setter
def hooks(self, value: Optional[pulumi.Input['ScheduleSpecTemplateHooksArgs']]):
pulumi.set(self, "hooks", value)
@property
@pulumi.getter(name="includeClusterResources")
def include_cluster_resources(self) -> Optional[pulumi.Input[bool]]:
"""
IncludeClusterResources specifies whether cluster-scoped resources should be included for consideration in the backup.
"""
return pulumi.get(self, "include_cluster_resources")
@include_cluster_resources.setter
def include_cluster_resources(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "include_cluster_resources", value)
@property
@pulumi.getter(name="includedNamespaces")
def included_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedNamespaces is a slice of namespace names to include objects from. If empty, all namespaces are included.
"""
return pulumi.get(self, "included_namespaces")
@included_namespaces.setter
def included_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_namespaces", value)
@property
@pulumi.getter(name="includedResources")
def included_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedResources is a slice of resource names to include in the backup. If empty, all resources are included.
"""
return pulumi.get(self, "included_resources")
@included_resources.setter
def included_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_resources", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['ScheduleSpecTemplateLabelSelectorArgs']]:
"""
LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['ScheduleSpecTemplateLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter(name="snapshotVolumes")
def snapshot_volumes(self) -> Optional[pulumi.Input[bool]]:
"""
SnapshotVolumes specifies whether to take cloud snapshots of any PV's referenced in the set of objects included in the Backup.
"""
return pulumi.get(self, "snapshot_volumes")
@snapshot_volumes.setter
def snapshot_volumes(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "snapshot_volumes", value)
@property
@pulumi.getter(name="storageLocation")
def storage_location(self) -> Optional[pulumi.Input[str]]:
"""
StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored.
"""
return pulumi.get(self, "storage_location")
@storage_location.setter
def storage_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_location", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[str]]:
"""
TTL is a time.Duration-parseable string describing how long the Backup should be retained for.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ttl", value)
@property
@pulumi.getter(name="volumeSnapshotLocations")
def volume_snapshot_locations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup.
"""
return pulumi.get(self, "volume_snapshot_locations")
@volume_snapshot_locations.setter
def volume_snapshot_locations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "volume_snapshot_locations", value)
@pulumi.input_type
class ScheduleSpecTemplateHooksArgs:
def __init__(__self__, *,
resources: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesArgs']]]] = None):
"""
Hooks represent custom behaviors that should be executed at different phases of the backup.
:param pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesArgs']]] resources: Resources are hooks that should be executed when backing up individual instances of a resource.
"""
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesArgs']]]]:
"""
Resources are hooks that should be executed when backing up individual instances of a resource.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesArgs']]]]):
pulumi.set(self, "resources", value)
@pulumi.input_type
class ScheduleSpecTemplateHooksResourcesArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
excluded_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
excluded_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
included_namespaces: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
included_resources: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
label_selector: Optional[pulumi.Input['ScheduleSpecTemplateHooksResourcesLabelSelectorArgs']] = None,
post: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesPostArgs']]]] = None,
pre: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesPreArgs']]]] = None):
"""
BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on the rules defined for namespaces, resources, and label selector.
:param pulumi.Input[str] name: Name is the name of this hook.
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_namespaces: ExcludedNamespaces specifies the namespaces to which this hook spec does not apply.
:param pulumi.Input[Sequence[pulumi.Input[str]]] excluded_resources: ExcludedResources specifies the resources to which this hook spec does not apply.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_namespaces: IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces.
:param pulumi.Input[Sequence[pulumi.Input[str]]] included_resources: IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources.
:param pulumi.Input['ScheduleSpecTemplateHooksResourcesLabelSelectorArgs'] label_selector: LabelSelector, if specified, filters the resources to which this hook spec applies.
:param pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesPostArgs']]] post: PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. These are executed after all "additional items" from item actions are processed.
:param pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesPreArgs']]] pre: PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. These are executed before any "additional items" from item actions are processed.
"""
pulumi.set(__self__, "name", name)
if excluded_namespaces is not None:
pulumi.set(__self__, "excluded_namespaces", excluded_namespaces)
if excluded_resources is not None:
pulumi.set(__self__, "excluded_resources", excluded_resources)
if included_namespaces is not None:
pulumi.set(__self__, "included_namespaces", included_namespaces)
if included_resources is not None:
pulumi.set(__self__, "included_resources", included_resources)
if label_selector is not None:
pulumi.set(__self__, "label_selector", label_selector)
if post is not None:
pulumi.set(__self__, "post", post)
if pre is not None:
pulumi.set(__self__, "pre", pre)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name is the name of this hook.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="excludedNamespaces")
def excluded_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedNamespaces specifies the namespaces to which this hook spec does not apply.
"""
return pulumi.get(self, "excluded_namespaces")
@excluded_namespaces.setter
def excluded_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_namespaces", value)
@property
@pulumi.getter(name="excludedResources")
def excluded_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ExcludedResources specifies the resources to which this hook spec does not apply.
"""
return pulumi.get(self, "excluded_resources")
@excluded_resources.setter
def excluded_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "excluded_resources", value)
@property
@pulumi.getter(name="includedNamespaces")
def included_namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies to all namespaces.
"""
return pulumi.get(self, "included_namespaces")
@included_namespaces.setter
def included_namespaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_namespaces", value)
@property
@pulumi.getter(name="includedResources")
def included_resources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
IncludedResources specifies the resources to which this hook spec applies. If empty, it applies to all resources.
"""
return pulumi.get(self, "included_resources")
@included_resources.setter
def included_resources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "included_resources", value)
@property
@pulumi.getter(name="labelSelector")
def label_selector(self) -> Optional[pulumi.Input['ScheduleSpecTemplateHooksResourcesLabelSelectorArgs']]:
"""
LabelSelector, if specified, filters the resources to which this hook spec applies.
"""
return pulumi.get(self, "label_selector")
@label_selector.setter
def label_selector(self, value: Optional[pulumi.Input['ScheduleSpecTemplateHooksResourcesLabelSelectorArgs']]):
pulumi.set(self, "label_selector", value)
@property
@pulumi.getter
def post(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesPostArgs']]]]:
"""
PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. These are executed after all "additional items" from item actions are processed.
"""
return pulumi.get(self, "post")
@post.setter
def post(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesPostArgs']]]]):
pulumi.set(self, "post", value)
@property
@pulumi.getter
def pre(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesPreArgs']]]]:
"""
PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. These are executed before any "additional items" from item actions are processed.
"""
return pulumi.get(self, "pre")
@pre.setter
def pre(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesPreArgs']]]]):
pulumi.set(self, "pre", value)
@pulumi.input_type
class ScheduleSpecTemplateHooksResourcesLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
LabelSelector, if specified, filters the resources to which this hook spec applies.
:param pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateHooksResourcesLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class ScheduleSpecTemplateHooksResourcesLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScheduleSpecTemplateHooksResourcesPostArgs:
def __init__(__self__, *,
exec_: pulumi.Input['ScheduleSpecTemplateHooksResourcesPostExecArgs']):
"""
BackupResourceHook defines a hook for a resource.
:param pulumi.Input['ScheduleSpecTemplateHooksResourcesPostExecArgs'] exec_: Exec defines an exec hook.
"""
pulumi.set(__self__, "exec_", exec_)
@property
@pulumi.getter(name="exec")
def exec_(self) -> pulumi.Input['ScheduleSpecTemplateHooksResourcesPostExecArgs']:
"""
Exec defines an exec hook.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: pulumi.Input['ScheduleSpecTemplateHooksResourcesPostExecArgs']):
pulumi.set(self, "exec_", value)
@pulumi.input_type
class ScheduleSpecTemplateHooksResourcesPostExecArgs:
def __init__(__self__, *,
command: pulumi.Input[Sequence[pulumi.Input[str]]],
container: Optional[pulumi.Input[str]] = None,
on_error: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Exec defines an exec hook.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command and arguments to execute.
:param pulumi.Input[str] container: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used.
:param pulumi.Input[str] on_error: OnError specifies how Velero should behave if it encounters an error executing this hook.
:param pulumi.Input[str] timeout: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
"""
pulumi.set(__self__, "command", command)
if container is not None:
pulumi.set(__self__, "container", container)
if on_error is not None:
pulumi.set(__self__, "on_error", on_error)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter
def command(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Command is the command and arguments to execute.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def container(self) -> Optional[pulumi.Input[str]]:
"""
Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used.
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container", value)
@property
@pulumi.getter(name="onError")
def on_error(self) -> Optional[pulumi.Input[str]]:
"""
OnError specifies how Velero should behave if it encounters an error executing this hook.
"""
return pulumi.get(self, "on_error")
@on_error.setter
def on_error(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_error", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class ScheduleSpecTemplateHooksResourcesPreArgs:
def __init__(__self__, *,
exec_: pulumi.Input['ScheduleSpecTemplateHooksResourcesPreExecArgs']):
"""
BackupResourceHook defines a hook for a resource.
:param pulumi.Input['ScheduleSpecTemplateHooksResourcesPreExecArgs'] exec_: Exec defines an exec hook.
"""
pulumi.set(__self__, "exec_", exec_)
@property
@pulumi.getter(name="exec")
def exec_(self) -> pulumi.Input['ScheduleSpecTemplateHooksResourcesPreExecArgs']:
"""
Exec defines an exec hook.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: pulumi.Input['ScheduleSpecTemplateHooksResourcesPreExecArgs']):
pulumi.set(self, "exec_", value)
@pulumi.input_type
class ScheduleSpecTemplateHooksResourcesPreExecArgs:
def __init__(__self__, *,
command: pulumi.Input[Sequence[pulumi.Input[str]]],
container: Optional[pulumi.Input[str]] = None,
on_error: Optional[pulumi.Input[str]] = None,
timeout: Optional[pulumi.Input[str]] = None):
"""
Exec defines an exec hook.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command and arguments to execute.
:param pulumi.Input[str] container: Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used.
:param pulumi.Input[str] on_error: OnError specifies how Velero should behave if it encounters an error executing this hook.
:param pulumi.Input[str] timeout: Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
"""
pulumi.set(__self__, "command", command)
if container is not None:
pulumi.set(__self__, "container", container)
if on_error is not None:
pulumi.set(__self__, "on_error", on_error)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter
def command(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Command is the command and arguments to execute.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "command", value)
@property
@pulumi.getter
def container(self) -> Optional[pulumi.Input[str]]:
"""
Container is the container in the pod where the command should be executed. If not specified, the pod's first container is used.
"""
return pulumi.get(self, "container")
@container.setter
def container(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container", value)
@property
@pulumi.getter(name="onError")
def on_error(self) -> Optional[pulumi.Input[str]]:
"""
OnError specifies how Velero should behave if it encounters an error executing this hook.
"""
return pulumi.get(self, "on_error")
@on_error.setter
def on_error(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "on_error", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[str]]:
"""
Timeout defines the maximum amount of time Velero should wait for the hook to complete before considering the execution a failure.
"""
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout", value)
@pulumi.input_type
class ScheduleSpecTemplateLabelSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateLabelSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
LabelSelector is a metav1.LabelSelector to filter with when adding individual objects to the backup. If empty or nil, all objects are included. Optional.
:param pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateLabelSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateLabelSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ScheduleSpecTemplateLabelSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class ScheduleSpecTemplateLabelSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class ScheduleStatusArgs:
def __init__(__self__, *,
last_backup: Optional[pulumi.Input[str]] = None,
phase: Optional[pulumi.Input[str]] = None,
validation_errors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
ScheduleStatus captures the current state of a Velero schedule
:param pulumi.Input[str] last_backup: LastBackup is the last time a Backup was run for this Schedule schedule
:param pulumi.Input[str] phase: Phase is the current phase of the Schedule
:param pulumi.Input[Sequence[pulumi.Input[str]]] validation_errors: ValidationErrors is a slice of all validation errors (if applicable)
"""
if last_backup is not None:
pulumi.set(__self__, "last_backup", last_backup)
if phase is not None:
pulumi.set(__self__, "phase", phase)
if validation_errors is not None:
pulumi.set(__self__, "validation_errors", validation_errors)
@property
@pulumi.getter(name="lastBackup")
def last_backup(self) -> Optional[pulumi.Input[str]]:
"""
LastBackup is the last time a Backup was run for this Schedule schedule
"""
return pulumi.get(self, "last_backup")
@last_backup.setter
def last_backup(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_backup", value)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current phase of the Schedule
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@property
@pulumi.getter(name="validationErrors")
def validation_errors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
ValidationErrors is a slice of all validation errors (if applicable)
"""
return pulumi.get(self, "validation_errors")
@validation_errors.setter
def validation_errors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "validation_errors", value)
@pulumi.input_type
class ServerStatusRequestStatusArgs:
def __init__(__self__, *,
phase: Optional[pulumi.Input[str]] = None,
plugins: Optional[pulumi.Input[Sequence[pulumi.Input['ServerStatusRequestStatusPluginsArgs']]]] = None,
processed_timestamp: Optional[pulumi.Input[str]] = None,
server_version: Optional[pulumi.Input[str]] = None):
"""
ServerStatusRequestStatus is the current status of a ServerStatusRequest.
:param pulumi.Input[str] phase: Phase is the current lifecycle phase of the ServerStatusRequest.
:param pulumi.Input[Sequence[pulumi.Input['ServerStatusRequestStatusPluginsArgs']]] plugins: Plugins list information about the plugins running on the Velero server
:param pulumi.Input[str] processed_timestamp: ProcessedTimestamp is when the ServerStatusRequest was processed by the ServerStatusRequestController.
:param pulumi.Input[str] server_version: ServerVersion is the Velero server version.
"""
if phase is not None:
pulumi.set(__self__, "phase", phase)
if plugins is not None:
pulumi.set(__self__, "plugins", plugins)
if processed_timestamp is not None:
pulumi.set(__self__, "processed_timestamp", processed_timestamp)
if server_version is not None:
pulumi.set(__self__, "server_version", server_version)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
Phase is the current lifecycle phase of the ServerStatusRequest.
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
@property
@pulumi.getter
def plugins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServerStatusRequestStatusPluginsArgs']]]]:
"""
Plugins list information about the plugins running on the Velero server
"""
return pulumi.get(self, "plugins")
@plugins.setter
def plugins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServerStatusRequestStatusPluginsArgs']]]]):
pulumi.set(self, "plugins", value)
@property
@pulumi.getter(name="processedTimestamp")
def processed_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
ProcessedTimestamp is when the ServerStatusRequest was processed by the ServerStatusRequestController.
"""
return pulumi.get(self, "processed_timestamp")
@processed_timestamp.setter
def processed_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "processed_timestamp", value)
@property
@pulumi.getter(name="serverVersion")
def server_version(self) -> Optional[pulumi.Input[str]]:
"""
ServerVersion is the Velero server version.
"""
return pulumi.get(self, "server_version")
@server_version.setter
def server_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_version", value)
@pulumi.input_type
class ServerStatusRequestStatusPluginsArgs:
def __init__(__self__, *,
kind: pulumi.Input[str],
name: pulumi.Input[str]):
"""
PluginInfo contains attributes of a Velero plugin
"""
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class VolumeSnapshotLocationSpecArgs:
def __init__(__self__, *,
provider: pulumi.Input[str],
config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
VolumeSnapshotLocationSpec defines the specification for a Velero VolumeSnapshotLocation.
:param pulumi.Input[str] provider: Provider is the provider of the volume storage.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] config: Config is for provider-specific configuration fields.
"""
pulumi.set(__self__, "provider", provider)
if config is not None:
pulumi.set(__self__, "config", config)
@property
@pulumi.getter
def provider(self) -> pulumi.Input[str]:
"""
Provider is the provider of the volume storage.
"""
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: pulumi.Input[str]):
pulumi.set(self, "provider", value)
@property
@pulumi.getter
def config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Config is for provider-specific configuration fields.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "config", value)
@pulumi.input_type
class VolumeSnapshotLocationStatusArgs:
def __init__(__self__, *,
phase: Optional[pulumi.Input[str]] = None):
"""
VolumeSnapshotLocationStatus describes the current status of a Velero VolumeSnapshotLocation.
:param pulumi.Input[str] phase: VolumeSnapshotLocationPhase is the lifecyle phase of a Velero VolumeSnapshotLocation.
"""
if phase is not None:
pulumi.set(__self__, "phase", phase)
@property
@pulumi.getter
def phase(self) -> Optional[pulumi.Input[str]]:
"""
VolumeSnapshotLocationPhase is the lifecyle phase of a Velero VolumeSnapshotLocation.
"""
return pulumi.get(self, "phase")
@phase.setter
def phase(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phase", value)
| 47.223423
| 695
| 0.682704
| 21,918
| 183,463
| 5.592618
| 0.028515
| 0.102571
| 0.073667
| 0.045228
| 0.917645
| 0.891066
| 0.869431
| 0.837705
| 0.825043
| 0.806125
| 0
| 0.000424
| 0.216354
| 183,463
| 3,884
| 696
| 47.235582
| 0.852181
| 0.352632
| 0
| 0.778682
| 1
| 0
| 0.12743
| 0.05904
| 0
| 0
| 0
| 0.00206
| 0
| 1
| 0.208048
| false
| 0
| 0.00214
| 0.002997
| 0.325771
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1eea7b4d3f4703445060023fb5298daa35b8c52f
| 26,581
|
py
|
Python
|
isi_sdk_8_1_0/isi_sdk_8_1_0/api/worm_api.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_1_0/isi_sdk_8_1_0/api/worm_api.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_1_0/isi_sdk_8_1_0/api/worm_api.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from isi_sdk_8_1_0.api_client import ApiClient
class WormApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_worm_domain(self, worm_domain, **kwargs): # noqa: E501
"""create_worm_domain # noqa: E501
Create a WORM domain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_worm_domain(worm_domain, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WormDomainCreateParams worm_domain: (required)
:return: WormDomainExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_worm_domain_with_http_info(worm_domain, **kwargs) # noqa: E501
else:
(data) = self.create_worm_domain_with_http_info(worm_domain, **kwargs) # noqa: E501
return data
def create_worm_domain_with_http_info(self, worm_domain, **kwargs): # noqa: E501
"""create_worm_domain # noqa: E501
Create a WORM domain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_worm_domain_with_http_info(worm_domain, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WormDomainCreateParams worm_domain: (required)
:return: WormDomainExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['worm_domain'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_worm_domain" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'worm_domain' is set
if ('worm_domain' not in params or
params['worm_domain'] is None):
raise ValueError("Missing the required parameter `worm_domain` when calling `create_worm_domain`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'worm_domain' in params:
body_params = params['worm_domain']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/worm/domains', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WormDomainExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_worm_domain(self, worm_domain_id, **kwargs): # noqa: E501
"""get_worm_domain # noqa: E501
View a single WORM domain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_worm_domain(worm_domain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str worm_domain_id: View a single WORM domain. (required)
:return: WormDomains
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_worm_domain_with_http_info(worm_domain_id, **kwargs) # noqa: E501
else:
(data) = self.get_worm_domain_with_http_info(worm_domain_id, **kwargs) # noqa: E501
return data
def get_worm_domain_with_http_info(self, worm_domain_id, **kwargs): # noqa: E501
"""get_worm_domain # noqa: E501
View a single WORM domain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_worm_domain_with_http_info(worm_domain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str worm_domain_id: View a single WORM domain. (required)
:return: WormDomains
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['worm_domain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_worm_domain" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'worm_domain_id' is set
if ('worm_domain_id' not in params or
params['worm_domain_id'] is None):
raise ValueError("Missing the required parameter `worm_domain_id` when calling `get_worm_domain`") # noqa: E501
collection_formats = {}
path_params = {}
if 'worm_domain_id' in params:
path_params['WormDomainId'] = params['worm_domain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/worm/domains/{WormDomainId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WormDomains', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_worm_settings(self, **kwargs): # noqa: E501
"""get_worm_settings # noqa: E501
Get the global WORM settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_worm_settings(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: WormSettings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_worm_settings_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_worm_settings_with_http_info(**kwargs) # noqa: E501
return data
def get_worm_settings_with_http_info(self, **kwargs): # noqa: E501
"""get_worm_settings # noqa: E501
Get the global WORM settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_worm_settings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: WormSettings
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_worm_settings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/worm/settings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WormSettings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_worm_domains(self, **kwargs): # noqa: E501
"""list_worm_domains # noqa: E501
List all WORM domains. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_worm_domains(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: WormDomainsExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_worm_domains_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_worm_domains_with_http_info(**kwargs) # noqa: E501
return data
def list_worm_domains_with_http_info(self, **kwargs): # noqa: E501
"""list_worm_domains # noqa: E501
List all WORM domains. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_worm_domains_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: WormDomainsExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort', 'limit', 'dir', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_worm_domains" % key
)
params[key] = val
del params['kwargs']
if ('sort' in params and
len(params['sort']) > 255):
raise ValueError("Invalid value for parameter `sort` when calling `list_worm_domains`, length must be less than or equal to `255`") # noqa: E501
if ('sort' in params and
len(params['sort']) < 0):
raise ValueError("Invalid value for parameter `sort` when calling `list_worm_domains`, length must be greater than or equal to `0`") # noqa: E501
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_worm_domains`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_worm_domains`, must be a value greater than or equal to `1`") # noqa: E501
if ('dir' in params and
len(params['dir']) < 0):
raise ValueError("Invalid value for parameter `dir` when calling `list_worm_domains`, length must be greater than or equal to `0`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `list_worm_domains`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `list_worm_domains`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/worm/domains', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WormDomainsExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_worm_domain(self, worm_domain, worm_domain_id, **kwargs): # noqa: E501
"""update_worm_domain # noqa: E501
Modify a single WORM domain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_worm_domain(worm_domain, worm_domain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WormDomain worm_domain: (required)
:param str worm_domain_id: Modify a single WORM domain. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_worm_domain_with_http_info(worm_domain, worm_domain_id, **kwargs) # noqa: E501
else:
(data) = self.update_worm_domain_with_http_info(worm_domain, worm_domain_id, **kwargs) # noqa: E501
return data
def update_worm_domain_with_http_info(self, worm_domain, worm_domain_id, **kwargs): # noqa: E501
"""update_worm_domain # noqa: E501
Modify a single WORM domain. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_worm_domain_with_http_info(worm_domain, worm_domain_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WormDomain worm_domain: (required)
:param str worm_domain_id: Modify a single WORM domain. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['worm_domain', 'worm_domain_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_worm_domain" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'worm_domain' is set
if ('worm_domain' not in params or
params['worm_domain'] is None):
raise ValueError("Missing the required parameter `worm_domain` when calling `update_worm_domain`") # noqa: E501
# verify the required parameter 'worm_domain_id' is set
if ('worm_domain_id' not in params or
params['worm_domain_id'] is None):
raise ValueError("Missing the required parameter `worm_domain_id` when calling `update_worm_domain`") # noqa: E501
collection_formats = {}
path_params = {}
if 'worm_domain_id' in params:
path_params['WormDomainId'] = params['worm_domain_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'worm_domain' in params:
body_params = params['worm_domain']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/worm/domains/{WormDomainId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_worm_settings(self, worm_settings, **kwargs): # noqa: E501
"""update_worm_settings # noqa: E501
Modify the global WORM settings. All input fields are optional, but one or more must be supplied. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_worm_settings(worm_settings, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WormSettingsExtended worm_settings: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_worm_settings_with_http_info(worm_settings, **kwargs) # noqa: E501
else:
(data) = self.update_worm_settings_with_http_info(worm_settings, **kwargs) # noqa: E501
return data
def update_worm_settings_with_http_info(self, worm_settings, **kwargs): # noqa: E501
"""update_worm_settings # noqa: E501
Modify the global WORM settings. All input fields are optional, but one or more must be supplied. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_worm_settings_with_http_info(worm_settings, async_req=True)
>>> result = thread.get()
:param async_req bool
:param WormSettingsExtended worm_settings: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['worm_settings'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_worm_settings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'worm_settings' is set
if ('worm_settings' not in params or
params['worm_settings'] is None):
raise ValueError("Missing the required parameter `worm_settings` when calling `update_worm_settings`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'worm_settings' in params:
body_params = params['worm_settings']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/1/worm/settings', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 40.519817
| 175
| 0.616079
| 3,138
| 26,581
| 4.96399
| 0.069152
| 0.053926
| 0.023111
| 0.027733
| 0.951018
| 0.93882
| 0.925339
| 0.912371
| 0.896642
| 0.88008
| 0
| 0.01988
| 0.29412
| 26,581
| 655
| 176
| 40.581679
| 0.810318
| 0.318573
| 0
| 0.738028
| 1
| 0.019718
| 0.220303
| 0.035387
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03662
| false
| 0
| 0.011268
| 0
| 0.101408
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4810a6917461b99b8a18e1dbec251d9df3903a9d
| 149
|
py
|
Python
|
readfiles.py
|
msrshahrukh100/Related-Posts-on-towards-light
|
5b36c59c7b4860103375e3c78ac260593af7ed2f
|
[
"MIT"
] | null | null | null |
readfiles.py
|
msrshahrukh100/Related-Posts-on-towards-light
|
5b36c59c7b4860103375e3c78ac260593af7ed2f
|
[
"MIT"
] | null | null | null |
readfiles.py
|
msrshahrukh100/Related-Posts-on-towards-light
|
5b36c59c7b4860103375e3c78ac260593af7ed2f
|
[
"MIT"
] | null | null | null |
from settings import DIR
import os
def read_posts_from_files() :
return [open(os.path.join(DIR,filename)).read() for filename in os.listdir(DIR)]
| 21.285714
| 81
| 0.758389
| 25
| 149
| 4.4
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120805
| 149
| 6
| 82
| 24.833333
| 0.839695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
483d79c50b9d3b2e12cfe748f3bd3c6dc975993d
| 30,009
|
py
|
Python
|
tests/test_turn_based_predator_prey.py
|
Leonardo767/Abmarl
|
9fada5447b09174c6a70b6032b4a8d08b66c4589
|
[
"Apache-2.0"
] | null | null | null |
tests/test_turn_based_predator_prey.py
|
Leonardo767/Abmarl
|
9fada5447b09174c6a70b6032b4a8d08b66c4589
|
[
"Apache-2.0"
] | null | null | null |
tests/test_turn_based_predator_prey.py
|
Leonardo767/Abmarl
|
9fada5447b09174c6a70b6032b4a8d08b66c4589
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from abmarl.sim.predator_prey import PredatorPreySimulation, Predator, Prey
from abmarl.managers import TurnBasedManager
def test_turn_based_predator_prey_distance():
np.random.seed(24)
predators = [Predator(id=f'predator{i}', attack=1) for i in range(2)]
prey = [Prey(id=f'prey{i}') for i in range(7)]
agents = predators + prey
sim_config = {
'region': 6,
'observation_mode': PredatorPreySimulation.ObservationMode.DISTANCE,
'agents': agents,
}
sim = PredatorPreySimulation.build(sim_config)
sim = TurnBasedManager(sim)
# Little hackish here because I have to explicitly set their values
obs = sim.reset()
sim.agents['predator0'].position = np.array([2, 3])
sim.agents['predator1'].position = np.array([0, 1])
sim.agents['prey0'].position = np.array([1, 1])
sim.agents['prey1'].position = np.array([4, 3])
sim.agents['prey2'].position = np.array([4, 3])
sim.agents['prey3'].position = np.array([2, 3])
sim.agents['prey4'].position = np.array([3, 3])
sim.agents['prey5'].position = np.array([3, 1])
sim.agents['prey6'].position = np.array([2, 1])
obs = {'predator0': sim.sim.get_obs('predator0')}
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-2, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 1]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([1, -2, 1]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, -2, 1]))
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([2, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([4, 2, 1]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([4, 2, 1]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([3, 2, 1]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([3, 0, 1]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([2, 0, 1]))
assert reward == {'predator1': 0}
assert done == {'predator1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
np.testing.assert_array_equal(obs['prey0']['predator0'], np.array([1, 2, 2]))
np.testing.assert_array_equal(obs['prey0']['predator1'], np.array([-1, 0, 2]))
np.testing.assert_array_equal(obs['prey0']['prey1'], np.array([3, 2, 1]))
np.testing.assert_array_equal(obs['prey0']['prey2'], np.array([3, 2, 1]))
np.testing.assert_array_equal(obs['prey0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey0']['prey4'], np.array([2, 2, 1]))
np.testing.assert_array_equal(obs['prey0']['prey5'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['prey0']['prey6'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['predator0'], np.array([-2, 0, 2]))
np.testing.assert_array_equal(obs['prey1']['predator1'], np.array([-4, -2, 2]))
np.testing.assert_array_equal(obs['prey1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey2'], np.array([0, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey4'], np.array([-1, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['prey5'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey1']['prey6'], np.array([-2, -2, 1]))
assert reward == {'prey0': -36, 'prey1': 0}
assert done == {'prey0': True, 'prey1': False, '__all__': False}
obs, reward, done, info = sim.step({'prey1': np.array([0, -1])})
np.testing.assert_array_equal(obs['prey2']['predator0'], np.array([-2, 0, 2]))
np.testing.assert_array_equal(obs['prey2']['predator1'], np.array([-4, -2, 2]))
np.testing.assert_array_equal(obs['prey2']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey1'], np.array([0, -1, 1]))
np.testing.assert_array_equal(obs['prey2']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey4'], np.array([-1, 0, 1]))
np.testing.assert_array_equal(obs['prey2']['prey5'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey2']['prey6'], np.array([-2, -2, 1]))
assert reward == {'prey2': 0}
assert done == {'prey2': False, '__all__': False}
obs, reward, done, info = sim.step({agent_id: np.array([1, 1]) for agent_id in obs})
np.testing.assert_array_equal(obs['prey3']['predator0'], np.array([0, 0, 2]))
np.testing.assert_array_equal(obs['prey3']['predator1'], np.array([-2, -2, 2]))
np.testing.assert_array_equal(obs['prey3']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey3']['prey1'], np.array([2, -1, 1]))
np.testing.assert_array_equal(obs['prey3']['prey2'], np.array([3, 1, 1]))
np.testing.assert_array_equal(obs['prey3']['prey4'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['prey3']['prey5'], np.array([1, -2, 1]))
np.testing.assert_array_equal(obs['prey3']['prey6'], np.array([0, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['predator0'], np.array([-1, 0, 2]))
np.testing.assert_array_equal(obs['prey4']['predator1'], np.array([-3, -2, 2]))
np.testing.assert_array_equal(obs['prey4']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey4']['prey1'], np.array([1, -1, 1]))
np.testing.assert_array_equal(obs['prey4']['prey2'], np.array([2, 1, 1]))
np.testing.assert_array_equal(obs['prey4']['prey3'], np.array([-0, 0, 0]))
np.testing.assert_array_equal(obs['prey4']['prey5'], np.array([0, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['prey6'], np.array([-1, -2, 1]))
assert reward == {'prey3': -36, 'prey4': 0}
assert done == {'prey3': True, 'prey4': False, '__all__': False}
obs, reward, done, info = sim.step({'prey4': np.array([-1, 1])})
np.testing.assert_array_equal(obs['prey5']['predator0'], np.array([-1, 2, 2]))
np.testing.assert_array_equal(obs['prey5']['predator1'], np.array([-3, 0, 2]))
np.testing.assert_array_equal(obs['prey5']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey1'], np.array([1, 1, 1]))
np.testing.assert_array_equal(obs['prey5']['prey2'], np.array([2, 3, 1]))
np.testing.assert_array_equal(obs['prey5']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey4'], np.array([-1, 3, 1]))
np.testing.assert_array_equal(obs['prey5']['prey6'], np.array([-1, 0, 1]))
assert reward == {'prey5': 0}
assert done == {'prey5': False, '__all__': False}
obs, reward, done, info = sim.step({agent_id: np.array([1, 1]) for agent_id in obs})
np.testing.assert_array_equal(obs['prey6']['predator0'], np.array([0, 2, 2]))
np.testing.assert_array_equal(obs['prey6']['predator1'], np.array([-2, 0, 2]))
np.testing.assert_array_equal(obs['prey6']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey1'], np.array([2, 1, 1]))
np.testing.assert_array_equal(obs['prey6']['prey2'], np.array([3, 3, 1]))
np.testing.assert_array_equal(obs['prey6']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey4'], np.array([0, 3, 1]))
np.testing.assert_array_equal(obs['prey6']['prey5'], np.array([2, 1, 1]))
assert reward == {'prey6': 0}
assert done == {'prey6': False, '__all__': False}
obs, reward, done, info = sim.step({agent_id: np.array([0, 0]) for agent_id in obs})
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-2, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([2, -1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([3, 1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([0, 1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([2, -1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, -2, 1]))
assert reward == {'predator0':36}
assert done == {'predator0': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([2, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([4, 1, 1]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([5, 3, 1]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([4, 1, 1]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([2, 0, 1]))
assert reward == {'predator1': 36}
assert done == {'predator1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 0, 'move': np.array([1, 0])} for agent_id in obs}
)
np.testing.assert_array_equal(obs['prey1']['predator0'], np.array([-2, 1, 2]))
np.testing.assert_array_equal(obs['prey1']['predator1'], np.array([-3, -1, 2]))
np.testing.assert_array_equal(obs['prey1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey2'], np.array([1, 2, 1]))
np.testing.assert_array_equal(obs['prey1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey5'], np.array([0, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['prey6'], np.array([-2, -1, 1]))
assert reward == {'prey1': -1}
assert done == {'prey1': False, '__all__': False}
obs, reward, done, info = sim.step({agent_id: np.array([-1, -1]) for agent_id in obs})
np.testing.assert_array_equal(obs['prey2']['predator0'], np.array([-3, -1, 2]))
np.testing.assert_array_equal(obs['prey2']['predator1'], np.array([-4, -3, 2]))
np.testing.assert_array_equal(obs['prey2']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey1'], np.array([-2, -3, 1]))
np.testing.assert_array_equal(obs['prey2']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey5'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey2']['prey6'], np.array([-3, -3, 1]))
assert reward == {'prey2': -1}
assert done == {'prey2': False, '__all__': False}
obs, reward, done, info = sim.step({agent_id: np.array([-1, 0]) for agent_id in obs})
np.testing.assert_array_equal(obs['prey4']['predator0'], np.array([0, -1, 2]))
np.testing.assert_array_equal(obs['prey4']['predator1'], np.array([-1, -3, 2]))
np.testing.assert_array_equal(obs['prey4']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey4']['prey1'], np.array([1, -3, 1]))
np.testing.assert_array_equal(obs['prey4']['prey2'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['prey4']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey4']['prey5'], np.array([2, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['prey6'], np.array([0, -3, 1]))
np.testing.assert_array_equal(obs['prey5']['predator0'], np.array([-2, 1, 2]))
np.testing.assert_array_equal(obs['prey5']['predator1'], np.array([-3, -1, 2]))
np.testing.assert_array_equal(obs['prey5']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey1'], np.array([-1, -1, 1]))
np.testing.assert_array_equal(obs['prey5']['prey2'], np.array([0, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey6'], np.array([-2, -1, 1]))
assert reward == {'prey4': -37, 'prey5': -1}
assert done == {'prey4': True, 'prey5': False, '__all__': False}
obs, reward, done, info = sim.step({'prey5': np.array([-1, 0])})
np.testing.assert_array_equal(obs['prey6']['predator0'], np.array([0, 2, 2]))
np.testing.assert_array_equal(obs['prey6']['predator1'], np.array([-1, 0, 2]))
np.testing.assert_array_equal(obs['prey6']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey1'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['prey6']['prey2'], np.array([2, 3, 1]))
np.testing.assert_array_equal(obs['prey6']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey5'], np.array([1, 1, 1]))
assert reward == {'prey6': 0}
assert done == {'prey6': False, '__all__': False}
obs, reward, done, info = sim.step({agent_id: np.array([0, -1]) for agent_id in obs})
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-1, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([1, -2, 1]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([2, 1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([1, -1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, -3, 1]))
assert reward == {'predator0': 36}
assert done == {'predator0': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([1, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([3, 3, 1]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([1, -1, 1]))
assert reward == {'predator1': -1}
assert done == {'predator1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
np.testing.assert_array_equal(obs['prey1']['predator0'], np.array([-1, 2, 2]))
np.testing.assert_array_equal(obs['prey1']['predator1'], np.array([-2, 0, 2]))
np.testing.assert_array_equal(obs['prey1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey2'], np.array([1, 3, 1]))
np.testing.assert_array_equal(obs['prey1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey6'], np.array([0, 0, 0]))
assert reward == {'prey1': -1}
assert done == {'prey1': False, '__all__': False}
obs, reward, done, info = sim.step({agent_id: np.array([-1, 0]) for agent_id in obs})
np.testing.assert_array_equal(obs['prey2']['predator0'], np.array([-2, -1, 2]))
np.testing.assert_array_equal(obs['prey2']['predator1'], np.array([-3, -3, 2]))
np.testing.assert_array_equal(obs['prey2']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey1'], np.array([-2, -3, 1]))
np.testing.assert_array_equal(obs['prey2']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey6'], np.array([0, 0, 0]))
assert reward == {'prey2': -1}
assert done == {'prey2': False, '__all__': False}
obs, reward, done, info = sim.step({agent_id: np.array([-1, 0]) for agent_id in obs})
np.testing.assert_array_equal(obs['prey5']['predator0'], np.array([-1, 1, 2]))
np.testing.assert_array_equal(obs['prey5']['predator1'], np.array([-2, -1, 2]))
np.testing.assert_array_equal(obs['prey5']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey1'], np.array([-1, -1, 1]))
np.testing.assert_array_equal(obs['prey5']['prey2'], np.array([0, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['predator0'], np.array([0, 3, 2]))
np.testing.assert_array_equal(obs['prey6']['predator1'], np.array([-1, 1, 2]))
np.testing.assert_array_equal(obs['prey6']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey1'], np.array([0, 1, 1]))
np.testing.assert_array_equal(obs['prey6']['prey2'], np.array([1, 4, 1]))
np.testing.assert_array_equal(obs['prey6']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-1, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([0, -2, 1]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([1, 1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, 0, 0]))
assert reward == {'prey5': -37, 'prey6': -37, 'predator0': 36}
assert done == {'prey5': True, 'prey6': True, 'predator0': False, '__all__': False}
obs, reward, done, info = sim.step({'predator0': {'attack': 1, 'move': np.array([0, 0])}})
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([1, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([0, 0, 0]))
assert reward == {'predator1': 36}
assert done == {'predator1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
np.testing.assert_array_equal(obs['prey1']['predator0'], np.array([0, 2, 2]))
np.testing.assert_array_equal(obs['prey1']['predator1'], np.array([-1, 0, 2]))
np.testing.assert_array_equal(obs['prey1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey2'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['predator0'], np.array([-1, -1, 2]))
np.testing.assert_array_equal(obs['prey2']['predator1'], np.array([-2, -3, 2]))
np.testing.assert_array_equal(obs['prey2']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey1'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-1, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([1, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([0, 0, 0]))
assert reward == {'prey1': -37, 'prey2': -37, 'predator0': 36, 'predator1': 36}
assert done == {
'prey1': True, 'prey2': True, 'predator0': False, 'predator1': False, '__all__': True
}
def test_turn_based_predator_prey_grid():
np.random.seed(24)
predators = [Predator(id=f'predator{i}', attack=1, view=0) for i in range(2)]
prey = [Prey(id=f'prey{i}', view=0) for i in range(7)]
agents = predators + prey
sim_config = {
'region': 6,
'observation_mode': PredatorPreySimulation.ObservationMode.GRID,
'agents': agents,
}
sim = PredatorPreySimulation.build(sim_config)
sim = TurnBasedManager(sim)
# Little hackish here because I have to explicitly set their values
obs = sim.reset()
sim.agents['predator0'].position = np.array([2, 3])
sim.agents['predator1'].position = np.array([0, 1])
sim.agents['prey0'].position = np.array([1, 1])
sim.agents['prey1'].position = np.array([4, 3])
sim.agents['prey2'].position = np.array([4, 3])
sim.agents['prey3'].position = np.array([2, 3])
sim.agents['prey4'].position = np.array([3, 3])
sim.agents['prey5'].position = np.array([3, 1])
sim.agents['prey6'].position = np.array([2, 1])
obs = {'predator0': sim.sim.get_obs('predator0')}
assert len(obs) == 1 and 'predator0' in obs
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
assert len(obs) == 1 and 'predator1' in obs
assert reward == {'predator1': 0}
assert done == {'predator1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
assert len(obs) == 2 and 'prey0' in obs and 'prey1' in obs
assert reward == {'prey0': -36, 'prey1': 0}
assert done == {'prey0': True, 'prey1': False, '__all__': False}
obs, reward, done, info = sim.step(
{'prey1': {'move': np.array([0, -1]), 'harvest': 0}}
)
assert len(obs) == 1 and 'prey2' in obs
assert reward == {'prey2': 0}
assert done == {'prey2': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'move': np.array([1, 1]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 2 and 'prey3' in obs and 'prey4' in obs
assert reward == {'prey3': -36, 'prey4': 0}
assert done == {'prey3': True, 'prey4': False, '__all__': False}
obs, reward, done, info = sim.step(
{'prey4': {'move': np.array([-1, 1]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 1 and 'prey5' in obs
assert reward == {'prey5': 0}
assert done == {'prey5': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'move': np.array([1, 1]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 1 and 'prey6' in obs
assert reward == {'prey6': 0}
assert done == {'prey6': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'move': np.array([0, 0]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 1 and 'predator0' in obs
assert reward == {'predator0':36}
assert done == {'predator0': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
assert len(obs) == 1 and 'predator1' in obs
assert reward == {'predator1': 36}
assert done == {'predator1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 0, 'move': np.array([1, 0])} for agent_id in obs}
)
assert len(obs) == 1 and 'prey1' in obs
assert reward == {'prey1': -1}
assert done == {'prey1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'move': np.array([-1, -1]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 1 and 'prey2' in obs
assert reward == {'prey2': -1}
assert done == {'prey2': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'move': np.array([-1, 0]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 2 and 'prey4' in obs and 'prey5'
assert reward == {'prey4': -37, 'prey5': -1}
assert done == {'prey4': True, 'prey5': False, '__all__': False}
obs, reward, done, info = sim.step(
{'prey5': {'move': np.array([-1, 0]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 1 and 'prey6' in obs
assert reward == {'prey6': 0}
assert done == {'prey6': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'move': np.array([0, -1]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 1 and 'predator0' in obs
assert reward == {'predator0': 36}
assert done == {'predator0': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
assert len(obs) == 1 and 'predator1' in obs
assert reward == {'predator1': -1}
assert done == {'predator1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
assert len(obs) == 1 and 'prey1' in obs
assert reward == {'prey1': -1}
assert done == {'prey1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'move': np.array([-1, 0]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 1 and 'prey2' in obs
assert reward == {'prey2': -1}
assert done == {'prey2': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'move': np.array([-1, 0]), 'harvest': 0} for agent_id in obs}
)
assert len(obs) == 3 and 'prey5' in obs and 'prey6' in obs and 'predator0' in obs
assert reward == {'prey5': -37, 'prey6': -37, 'predator0': 36}
assert done == {'prey5': True, 'prey6': True, 'predator0': False, '__all__': False}
obs, reward, done, info = sim.step({'predator0': {'attack': 1, 'move': np.array([0, 0])}})
assert len(obs) == 1 and 'predator1' in obs
assert reward == {'predator1': 36}
assert done == {'predator1': False, '__all__': False}
obs, reward, done, info = sim.step(
{agent_id: {'attack': 1, 'move': np.array([0, 0])} for agent_id in obs}
)
assert len(obs) == 4
assert 'prey1' in obs
assert 'prey2' in obs
assert 'predator0' in obs
assert 'predator1' in obs
assert reward == {'prey1': -37, 'prey2': -37, 'predator0': 36, 'predator1': 36}
assert done == {
'prey1': True, 'prey2': True, 'predator0': False, 'predator1': False, '__all__': True
}
| 58.269903
| 94
| 0.618148
| 4,557
| 30,009
| 3.92122
| 0.018872
| 0.109687
| 0.188035
| 0.250714
| 0.980581
| 0.97879
| 0.972634
| 0.97006
| 0.963904
| 0.962841
| 0
| 0.060693
| 0.15392
| 30,009
| 514
| 95
| 58.383268
| 0.643088
| 0.004365
| 0
| 0.615054
| 0
| 0
| 0.148519
| 0
| 0
| 0
| 0
| 0
| 0.696774
| 1
| 0.004301
| false
| 0
| 0.006452
| 0
| 0.010753
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b5110926a50d11ac8624ea7ec0f0e16dd14c8478
| 7
|
py
|
Python
|
package-test/spam-p1/spam/package2.py
|
plant99/import-system-talk-resources
|
a48620ee8e6eda5c3a1c09a708804770781d4bea
|
[
"MIT"
] | 1
|
2020-08-20T16:37:49.000Z
|
2020-08-20T16:37:49.000Z
|
package-test/spam1/package2.py
|
plant99/import-system-talk-resources
|
a48620ee8e6eda5c3a1c09a708804770781d4bea
|
[
"MIT"
] | null | null | null |
package-test/spam1/package2.py
|
plant99/import-system-talk-resources
|
a48620ee8e6eda5c3a1c09a708804770781d4bea
|
[
"MIT"
] | 1
|
2020-08-20T16:38:22.000Z
|
2020-08-20T16:38:22.000Z
|
x = 4
| 2.333333
| 5
| 0.285714
| 2
| 7
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.571429
| 7
| 2
| 6
| 3.5
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
82ec4bc4f9c2681c4f0439da95809a7b14e1d12e
| 15,940
|
py
|
Python
|
randomras/smoothagg.py
|
quentinll/pertrenderer
|
d292ed4f09d49a957fba9d2f8bdd9d5c66930261
|
[
"BSD-2-Clause"
] | 1
|
2022-02-03T08:31:40.000Z
|
2022-02-03T08:31:40.000Z
|
randomras/smoothagg.py
|
quentinll/pertrenderer
|
d292ed4f09d49a957fba9d2f8bdd9d5c66930261
|
[
"BSD-2-Clause"
] | null | null | null |
randomras/smoothagg.py
|
quentinll/pertrenderer
|
d292ed4f09d49a957fba9d2f8bdd9d5c66930261
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Inspired from Pytorch3D.
"""
import torch
from torch.nn import Module
from torch.autograd import Function
class randomArgmax(Function):
@staticmethod
def forward(ctx,z,nb_samples = 1,noise_intensity = 1e-1, noise_type ="gaussian", fixed_noise = False):
device = z.device
z_size = z.size()
noise_dict ={"gaussian": torch.tensor(0),"gumbel": torch.tensor(1), "cauchy":torch.tensor(2), "uniform": torch.tensor(3)}
noise_type = noise_dict[noise_type]
if fixed_noise:
torch.manual_seed(1)
if noise_type == noise_dict["gaussian"]:
noise = torch.normal(mean = torch.zeros((nb_samples,z_size[0],z_size[1],z_size[2],z_size[3]),device=device),std = 1. )
elif noise_type == noise_dict["gumbel"]:
m = torch.distributions.gumbel.Gumbel(torch.tensor([0.]).to(device=device), torch.tensor([1.]).to(device=device))
noise = m.sample((nb_samples,z_size[0],z_size[1],z_size[2],z_size[3])).squeeze(-1)
elif noise_type == noise_dict["cauchy"]:
m = torch.distributions.cauchy.Cauchy(torch.tensor([0.]).to(device=device), torch.tensor([1.]).to(device=device))
noise = torch.clamp(m.sample((nb_samples,z_size[0],z_size[1],z_size[2],z_size[3])).squeeze(-1),min=-1e7, max=1e7)
elif noise_type == noise_dict["uniform"]:
m = torch.distributions.uniform.Uniform(torch.tensor([-0.5]).to(device=device), torch.tensor([0.5]).to(device=device))
noise = m.sample((nb_samples,z_size[0],z_size[1],z_size[2],z_size[3])).squeeze(-1)
else:
print("noise type not implemented")
z_pert = z + noise_intensity*noise
_, indices = torch.max(z_pert, dim =-1, keepdim=True)
weights = torch.zeros(z_pert.size(), device = device)
weights.scatter_(-1, indices, 1)
_, indices = torch.max(z, dim =-1, keepdim=True)
vr_var = torch.zeros(z.size(), device = device)
vr_var.scatter_(-1, indices, 1) #used during backward to reduce variance of gradient estimator
ctx.save_for_backward(weights,noise,noise_intensity,vr_var,noise_type)
weight = weights.mean(dim = 0)
return weight
@staticmethod
def backward(ctx, grad_l):
grad_z = None
grad_gamma= None
weights, noise, noise_intensity,vr_var, noise_type = ctx.saved_tensors
noise_dict ={"gaussian": torch.tensor(0),"gumbel":torch.tensor(1),"cauchy":torch.tensor(2), "uniform": torch.tensor(3)}
if noise_type == noise_dict["gaussian"]:
grad_z = torch.matmul(grad_l.repeat(noise.size()[0],1,1,1,1).unsqueeze(-2),weights.unsqueeze(-1)-vr_var.unsqueeze(0).repeat(weights.size()[0],1,1,1,1).unsqueeze(-1))
grad_z = torch.matmul(grad_z,noise.unsqueeze(-2))/noise_intensity
grad_z = grad_z.squeeze(-2)
grad_gamma = (weights-vr_var.unsqueeze(0).repeat(weights.size()[0],1,1,1,1))*(torch.square(torch.norm(noise,dim=-1,keepdim=True))- 1.)/noise_intensity
grad_gamma = grad_l*grad_gamma
grad_gamma = grad_gamma.sum(dim=(1,2,3,4))
elif noise_type == noise_dict["cauchy"]:
grad_z = torch.matmul(grad_l.repeat(noise.size()[0],1,1,1,1).unsqueeze(-2),weights.unsqueeze(-1)-vr_var.unsqueeze(0).repeat(weights.size()[0],1,1,1,1).unsqueeze(-1))
grad_z = torch.matmul(grad_z,(2*noise/(1.+torch.square(noise))).unsqueeze(-2))/noise_intensity #need to replace with grad of density
grad_z = grad_z.squeeze(-2)
grad_gamma = (weights-vr_var.unsqueeze(0).repeat(weights.size()[0],1,1,1,1))*(torch.matmul((2*noise/(1.+torch.square(noise))).unsqueeze(-2),noise.unsqueeze(-1)).squeeze(-1)- 1.)/noise_intensity
grad_gamma = grad_l*grad_gamma
grad_gamma = grad_gamma.sum(dim=(1,2,3,4))
elif noise_type == noise_dict["uniform"]:
print("noise_type not implemented")
elif noise_type == noise_dict["gumbel"]:
print("noise_type not implemented")
else:
print("noise_type not implemented")
grad_z = grad_z.mean(dim=0)
grad_gamma = grad_gamma.mean(dim=0)
return grad_z, None, grad_gamma, None, None
class randomArgmax_wovr(Function):
"""
perturbed argmax without variance reduction
"""
@staticmethod
def forward(ctx,z,nb_samples = 1,noise_intensity = 1e-1, noise_type ="gaussian", fixed_noise = False):
device = z.device
z_size = z.size()
noise_dict ={"gaussian": torch.tensor(0),"gumbel": torch.tensor(1), "cauchy":torch.tensor(2), "uniform": torch.tensor(3)}
noise_type = noise_dict[noise_type]
if fixed_noise:
torch.manual_seed(1)
if noise_type == noise_dict["gaussian"]:
noise = torch.normal(mean = torch.zeros((nb_samples,z_size[0],z_size[1],z_size[2],z_size[3]),device=device),std = 1. )
elif noise_type == noise_dict["gumbel"]:
m = torch.distributions.gumbel.Gumbel(torch.tensor([0.]).to(device=device), torch.tensor([1.]).to(device=device))
noise = m.sample((nb_samples,z_size[0],z_size[1],z_size[2],z_size[3])).squeeze(-1)
elif noise_type == noise_dict["cauchy"]:
m = torch.distributions.cauchy.Cauchy(torch.tensor([0.]).to(device=device), torch.tensor([1.]).to(device=device))
noise = torch.clamp(m.sample((nb_samples,z_size[0],z_size[1],z_size[2],z_size[3])).squeeze(-1),min=-1e7, max=1e7)
elif noise_type == noise_dict["uniform"]:
m = torch.distributions.uniform.Uniform(torch.tensor([-0.5]).to(device=device), torch.tensor([0.5]).to(device=device))
noise = m.sample((nb_samples,z_size[0],z_size[1],z_size[2],z_size[3])).squeeze(-1)
else:
print("noise type not implemented")
z_pert = z + noise_intensity*noise
_, indices = torch.max(z_pert, dim =-1, keepdim=True)
weights = torch.zeros(z_pert.size(), device = device)
weights.scatter_(-1, indices, 1)
_, indices = torch.max(z, dim =-1, keepdim=True)
vr_var = torch.zeros(z.size(), device = device)
vr_var.scatter_(-1, indices, 1) #used during backward to reduce variance of gradient estimator
ctx.save_for_backward(weights,noise,noise_intensity,vr_var,noise_type)
weight = weights.mean(dim = 0)
return weight
@staticmethod
def backward(ctx, grad_l):
grad_z = None
grad_gamma= None
weights, noise, noise_intensity,vr_var, noise_type = ctx.saved_tensors
noise_dict ={"gaussian": torch.tensor(0),"gumbel":torch.tensor(1),"cauchy":torch.tensor(2), "uniform": torch.tensor(3)}
if noise_type == noise_dict["gaussian"]:
grad_z = torch.matmul(grad_l.repeat(noise.size()[0],1,1,1,1).unsqueeze(-2),weights.unsqueeze(-1))
grad_z = torch.matmul(grad_z,noise.unsqueeze(-2))/noise_intensity
grad_z = grad_z.squeeze(-2)
grad_gamma = (weights)*(torch.square(torch.norm(noise,dim=-1,keepdim=True))- 1.)/noise_intensity
grad_gamma = grad_l*grad_gamma
grad_gamma = grad_gamma.sum(dim=(1,2,3,4))
elif noise_type == noise_dict["cauchy"]:
grad_z = torch.matmul(grad_l.repeat(noise.size()[0],1,1,1,1).unsqueeze(-2),weights.unsqueeze(-1)-vr_var.unsqueeze(0).repeat(weights.size()[0],1,1,1,1).unsqueeze(-1))
grad_z = torch.matmul(grad_z,(2*noise/(1.+torch.square(noise))).unsqueeze(-2))/noise_intensity
grad_z = grad_z.squeeze(-2)
grad_gamma = (weights-vr_var.unsqueeze(0).repeat(weights.size()[0],1,1,1,1))*(torch.matmul((2*noise/(1.+torch.square(noise))).unsqueeze(-2),noise.unsqueeze(-1)).squeeze(-1)- 1.)/noise_intensity
grad_gamma = grad_l*grad_gamma
grad_gamma = grad_gamma.sum(dim=(1,2,3,4))
elif noise_type == noise_dict["uniform"]:
print("noise_type not implemented")
elif noise_type == noise_dict["gumbel"]:
print("noise_type not implemented")
else:
print("noise_type not implemented")
grad_z = grad_z.mean(dim=0)
grad_gamma = grad_gamma.mean(dim=0)
#print("grad gamma", grad_gamma)
return grad_z, None, grad_gamma, None, None
class SmoothAggBase(Module):
def __init__(self,
gamma,
alpha,
eps,
nb_samples=1):
super(SmoothAggBase,self).__init__()
self.gamma = torch.tensor(gamma,requires_grad= True)
self.alpha = torch.tensor(alpha,requires_grad=True)
self.nb_samples = nb_samples
self.eps = eps # Weight for background color
def update_smoothing(self, gamma = 4e-2, alpha = 1.):
self.gamma = torch.tensor(gamma,requires_grad= True)
self.alpha = torch.tensor(alpha,requires_grad=True)
def update_nb_samples(self, nb_samples):
self.nb_samples = nb_samples
class SoftAgg(SmoothAggBase):
def __init__(self,
gamma = 4e-2,
alpha = 1.,
eps= 1e-10):
super(SoftAgg,self).__init__(gamma,alpha,eps)
def aggregate(self, zbuf,zfar,znear,prob_map,mask):
device =zbuf.device
z_inv = (zfar - zbuf) / (zfar - znear) * mask
z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=self.eps)
log_prob = log_corrected.apply(prob_map)
gal = self.gamma/self.alpha
z_map = (prod_corrected.apply(gal,log_prob)+ z_inv-z_inv_max)
z_map =torch.cat((z_map,(torch.ones((z_map.size()[0],z_map.size()[1],z_map.size()[2],1),device=device)*self.eps -z_inv_max)),dim=-1)
weights = torch.softmax(prod_corrected.apply(1./self.gamma,z_map),dim=-1)
return weights
class GaussianAgg(SmoothAggBase):
def __init__(self,
nb_samples=16,
gamma = 4e-2,
alpha = 1.,
eps= 1e-10,
fixed_noise=False):
super(GaussianAgg,self).__init__(gamma,alpha,eps,nb_samples)
self.fixed_noise = fixed_noise
def aggregate(self,zbuf,zfar,znear,prob_map,mask):
device =zbuf.device
z_inv = (zfar - zbuf) / (zfar - znear) * mask
z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=self.eps)
log_prob = log_corrected.apply(prob_map)
z_map = (prod_corrected.apply(self.gamma/self.alpha,log_prob)+ z_inv-z_inv_max)
z_map =torch.cat((z_map,torch.ones((z_map.size()[0],z_map.size()[1],z_map.size()[2],1),device=device)*self.eps-z_inv_max ),dim=-1)
randomarg = randomArgmax.apply
randomax = randomarg(z_map, self.nb_samples, self.gamma, "gaussian", self.fixed_noise)
return randomax
class GaussianAgg_wovr(SmoothAggBase):
def __init__(self,
nb_samples=16,
gamma = 4e-2,
alpha = 1.,
eps= 1e-10,
fixed_noise=False):
super(GaussianAgg_wovr,self).__init__(gamma,alpha,eps,nb_samples)
self.fixed_noise = fixed_noise
def aggregate(self,zbuf,zfar,znear,prob_map,mask):
device =zbuf.device
z_inv = (zfar - zbuf) / (zfar - znear) * mask
z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=self.eps)
log_prob = log_corrected.apply(prob_map)
z_map = (prod_corrected.apply(self.gamma/self.alpha,log_prob)+ z_inv-z_inv_max)
z_map =torch.cat((z_map,torch.ones((z_map.size()[0],z_map.size()[1],z_map.size()[2],1),device=device)*self.eps-z_inv_max ),dim=-1)
randomarg = randomArgmax_wovr.apply
randomax = randomarg(z_map, self.nb_samples, self.gamma, "gaussian", self.fixed_noise)
return randomax
class CauchyAgg(SmoothAggBase):
def __init__(self,
nb_samples=16,
gamma = 4e-2,
alpha = 1.,
eps = 1e-10,
fixed_noise=False):
super(CauchyAgg,self).__init__(gamma,alpha,eps,nb_samples)
self.fixed_noise = fixed_noise
def aggregate(self,zbuf,zfar,znear,prob_map,mask):
device =zbuf.device
z_inv = (zfar - zbuf) / (zfar - znear) * mask
z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=self.eps)
log_prob = log_corrected.apply(prob_map)
z_map = (prod_corrected.apply(self.gamma/self.alpha,log_prob)+ z_inv-z_inv_max)
z_map =torch.cat((z_map,torch.ones((z_map.size()[0],z_map.size()[1],z_map.size()[2],1),device=device)*self.eps-z_inv_max ),dim=-1)
randomarg = randomArgmax.apply
randomax = randomarg(z_map, self.nb_samples, self.gamma, "cauchy", self.fixed_noise)
return randomax
class UniformAgg(SmoothAggBase):
def __init__(self,
nb_samples=16,
gamma = 4e-2,
alpha = 1.,
eps = 1e-10,
fixed_noise=False):
self.fixed_noise = fixed_noise
super().__init__(gamma,alpha,eps, nb_samples)
def aggregate(self,zbuf,zfar,znear,prob_map,mask):
device =zbuf.device
z_inv = (zfar - zbuf) / (zfar - znear) * mask
z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=self.eps)
z_map = ((self.gamma/self.alpha)*log_corrected.apply(prob_map)+ z_inv-z_inv_max)
z_map =torch.cat((z_map,torch.ones((z_map.size()[0],z_map.size()[1],z_map.size()[2],1),device=device)*self.eps-z_inv_max ),dim=-1)
randomarg = randomArgmax.apply
randomax = randomarg(z_map, self.nb_samples, self.gamma, "uniform", self.fixed_noise)
return randomax
class HardAgg():
def __init__(self,eps=1e-10):
self.eps = eps
return
def aggregate(self,zbuf,zfar,znear,prob_map,mask):
device =zbuf.device
z_inv = (zfar - zbuf) / (zfar - znear) * mask
z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=self.eps)
z_map = ((1./1e6)*log_corrected.apply(prob_map)+ z_inv-z_inv_max)
z_map =torch.cat((z_map,torch.ones((z_map.size()[0],z_map.size()[1],z_map.size()[2],1),device=device)*self.eps-z_inv_max ),dim=-1)
_, indices = torch.max(z_map, dim =-1, keepdim=True)
weight = torch.zeros(z_map.size(), device = device)
weight.scatter_(-1, indices, 1)
return weight
class log_corrected(Function):
"""
logarithm whose backward pass returns 0 instead of nan when x is null and backward pass vector is null.
"""
@staticmethod
def forward(ctx,x):
ctx.save_for_backward(x)
return x.log()
@staticmethod
def backward(ctx,grad_l):
grad_log = None
if ctx.needs_input_grad[0]:
(x,) = ctx.saved_tensors
device = x.device
grad_log = torch.ones(x.size(),device= device)/x
grad_log = torch.where(torch.isinf(grad_log), torch.zeros_like(grad_log), grad_log)
grad_log = grad_log*grad_l
return grad_log
class prod_corrected(Function):
"""
product whose backward pass returns 0 instead of nan when x is null and y is infty.
"""
@staticmethod
def forward(ctx,x,y):
ctx.save_for_backward(x,y)
return x*y
@staticmethod
def backward(ctx,grad_l):
grad_prod_x = None
grad_prod_y = None
(x,y) = ctx.saved_tensors
if ctx.needs_input_grad[0]:
y = torch.where(torch.isinf(y), torch.zeros_like(y), y)
grad_prod_x = y*grad_l
grad_prod_x = grad_prod_x.nansum()
if ctx.needs_input_grad[1]:
device = x.device
grad_prod_y = x*grad_l
grad_prod_y = torch.where(torch.isnan(grad_prod_y), torch.zeros_like(grad_prod_y), grad_prod_y)
return grad_prod_x, grad_prod_y
| 46.882353
| 203
| 0.619197
| 2,308
| 15,940
| 4.058492
| 0.070191
| 0.018362
| 0.006405
| 0.03459
| 0.877762
| 0.841251
| 0.824811
| 0.813921
| 0.811679
| 0.803779
| 0
| 0.025517
| 0.232936
| 15,940
| 339
| 204
| 47.020649
| 0.740574
| 0.029799
| 0
| 0.760714
| 0
| 0
| 0.030439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082143
| false
| 0
| 0.010714
| 0
| 0.185714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d204175b37fc3d10b75add293e5bbf9889ee759c
| 6,756
|
py
|
Python
|
carbondesign/tests/test_radio_button_html.py
|
dozymoe/django-carbondesign
|
34aed0cfdccfa90fcb5bf2bbd347229815f1417b
|
[
"MIT"
] | null | null | null |
carbondesign/tests/test_radio_button_html.py
|
dozymoe/django-carbondesign
|
34aed0cfdccfa90fcb5bf2bbd347229815f1417b
|
[
"MIT"
] | null | null | null |
carbondesign/tests/test_radio_button_html.py
|
dozymoe/django-carbondesign
|
34aed0cfdccfa90fcb5bf2bbd347229815f1417b
|
[
"MIT"
] | null | null | null |
# pylint:disable=missing-module-docstring,missing-class-docstring,missing-function-docstring
from .base import compare_template, SimpleTestCase
class RadioHtmlTest(SimpleTestCase):
maxDiff = None
def test_default(self):
template = """
{% load carbondesign %}
{% Radio form.choice2 exclude="blue" label="Radio button label" %}
"""
expected = """
<fieldset class="bx--fieldset">
<legend class="bx--label">Radio button label</legend>
<div class="bx--form-item">
<div class="bx--radio-button-group">
<div class="bx--radio-button-wrapper">
<input id="id_choice2-1" class="bx--radio-button" type="radio" value="red"
name="choice2" tabindex="0" checked>
<label for="id_choice2-1" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
<div class="bx--radio-button-wrapper">
<input id="id_choice2-2" class="bx--radio-button" type="radio" value="green"
name="choice2" tabindex="0">
<label for="id_choice2-2" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
<div class="bx--radio-button-wrapper">
<input id="id_choice2-3" class="bx--radio-button" type="radio" value="blue"
name="choice2" tabindex="0" disabled>
<label for="id_choice2-3" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
</div>
</div>
</fieldset>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_horizonal_left(self):
template = """
{% load carbondesign %}
{% Radio form.choice2 exclude="blue" label="Radio button label" left=True %}
"""
expected = """
<fieldset class="bx--fieldset">
<legend class="bx--label">Radio button label</legend>
<div class="bx--form-item">
<div class="bx--radio-button-group ">
<div class="bx--radio-button-wrapper bx--radio-button-wrapper--label-left">
<input id="id_choice2-1" class="bx--radio-button" type="radio" value="red"
name="choice2" tabindex="0" checked>
<label for="id_choice2-1" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
<div class="bx--radio-button-wrapper bx--radio-button-wrapper--label-left">
<input id="id_choice2-2" class="bx--radio-button" type="radio" value="green"
name="choice2" tabindex="0">
<label for="id_choice2-2" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
<div class="bx--radio-button-wrapper bx--radio-button-wrapper--label-left">
<input id="id_choice2-3" class="bx--radio-button" type="radio" value="blue"
name="choice2" tabindex="0" disabled>
<label for="id_choice2-3" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
</div>
</div>
</fieldset>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_vertical(self):
template = """
{% load carbondesign %}
{% Radio form.choice2 exclude="blue" label="Radio button label" vertical=True %}
"""
expected = """
<fieldset class="bx--fieldset">
<legend class="bx--label">Radio button label</legend>
<div class="bx--form-item">
<div class="bx--radio-button-group bx--radio-button-group--vertical">
<div class="bx--radio-button-wrapper">
<input id="id_choice2-1" class="bx--radio-button" type="radio" value="red"
name="choice2" tabindex="0" checked>
<label for="id_choice2-1" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
<div class="bx--radio-button-wrapper">
<input id="id_choice2-2" class="bx--radio-button" type="radio" value="green"
name="choice2" tabindex="0">
<label for="id_choice2-2" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
<div class="bx--radio-button-wrapper">
<input id="id_choice2-3" class="bx--radio-button" type="radio" value="blue"
name="choice2" tabindex="0" disabled>
<label for="id_choice2-3" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
</div>
</div>
</fieldset>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
def test_vertical_left(self):
template = """
{% load carbondesign %}
{% Radio form.choice2 exclude="blue" label="Radio button label" vertical=True left=True %}
"""
expected = """
<fieldset class="bx--fieldset">
<legend class="bx--label">Radio button label</legend>
<div class="bx--form-item">
<div class="bx--radio-button-group bx--radio-button-group--vertical">
<div class="bx--radio-button-wrapper bx--radio-button-wrapper--label-left">
<input id="id_choice2-1" class="bx--radio-button" type="radio" value="red"
name="choice2" tabindex="0" checked>
<label for="id_choice2-1" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
<div class="bx--radio-button-wrapper bx--radio-button-wrapper--label-left">
<input id="id_choice2-2" class="bx--radio-button" type="radio" value="green"
name="choice2" tabindex="0">
<label for="id_choice2-2" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
<div class="bx--radio-button-wrapper bx--radio-button-wrapper--label-left">
<input id="id_choice2-3" class="bx--radio-button" type="radio" value="blue"
name="choice2" tabindex="0" disabled>
<label for="id_choice2-3" class="bx--radio-button__label">
<span class="bx--radio-button__appearance"></span>
<span class="bx--radio-button__label-text">Radio button label</span>
</label>
</div>
</div>
</div>
</fieldset>
"""
rendered = compare_template(template, expected)
self.assertEqual(*rendered)
| 39.741176
| 92
| 0.677768
| 926
| 6,756
| 4.829374
| 0.065875
| 0.226297
| 0.209302
| 0.257603
| 0.957737
| 0.957737
| 0.957737
| 0.957737
| 0.957737
| 0.957737
| 0
| 0.012938
| 0.130551
| 6,756
| 169
| 93
| 39.976331
| 0.748383
| 0.013321
| 0
| 0.932515
| 0
| 0.128834
| 0.877701
| 0.415066
| 0
| 0
| 0
| 0
| 0.02454
| 1
| 0.02454
| false
| 0
| 0.006135
| 0
| 0.042945
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
d208c342388e559f589dcb8bdfbadfbbc77f9025
| 3,446
|
py
|
Python
|
tests/estimator/classifier/ExportedData.py
|
zvizdo/sklearn-porter
|
54b23c94921c0529516d47222043f2af0a1034ab
|
[
"MIT"
] | null | null | null |
tests/estimator/classifier/ExportedData.py
|
zvizdo/sklearn-porter
|
54b23c94921c0529516d47222043f2af0a1034ab
|
[
"MIT"
] | null | null | null |
tests/estimator/classifier/ExportedData.py
|
zvizdo/sklearn-porter
|
54b23c94921c0529516d47222043f2af0a1034ab
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
class ExportedData():
def test_random_features__binary_data__exported(self):
self.load_binary_data()
self._port_estimator(export_data=True)
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
preds, ground_truth = [], []
for _ in range(self.N_RANDOM_FEATURE_SETS):
x = np.random.uniform(amin, amax, self.n_features)
preds.append(self.pred_in_custom(x, export_data=True))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
def test_random_features__iris_data__exported(self):
self.load_iris_data()
self._port_estimator(export_data=True)
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
preds, ground_truth = [], []
for _ in range(self.N_RANDOM_FEATURE_SETS):
x = np.random.uniform(amin, amax, self.n_features)
preds.append(self.pred_in_custom(x, export_data=True))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
def test_random_features__digits_data__exported(self):
self.load_digits_data()
self._port_estimator(export_data=True)
amin = np.amin(self.X, axis=0)
amax = np.amax(self.X, axis=0)
preds, ground_truth = [], []
for _ in range(self.N_RANDOM_FEATURE_SETS):
x = np.random.uniform(amin, amax, self.n_features)
preds.append(self.pred_in_custom(x, export_data=True))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
def test_existing_features__binary_data__exported(self):
self.load_binary_data()
self._port_estimator(export_data=True)
preds, ground_truth = [], []
n = min(self.N_EXISTING_FEATURE_SETS, len(self.X))
for x in self.X[:n]:
preds.append(self.pred_in_custom(x, export_data=True))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
def test_existing_features__iris_data__exported(self):
self.load_iris_data()
self._port_estimator(export_data=True)
preds, ground_truth = [], []
n = min(self.N_EXISTING_FEATURE_SETS, len(self.X))
for x in self.X[:n]:
preds.append(self.pred_in_custom(x, export_data=True))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
def test_existing_features__digits_data__exported(self):
self.load_digits_data()
self._port_estimator(export_data=True)
preds, ground_truth = [], []
n = min(self.N_EXISTING_FEATURE_SETS, len(self.X))
for x in self.X[:n]:
preds.append(self.pred_in_custom(x, export_data=True))
ground_truth.append(self.pred_in_py(x))
self._clear_estimator()
# noinspection PyUnresolvedReferences
self.assertListEqual(preds, ground_truth)
| 41.02381
| 66
| 0.658445
| 444
| 3,446
| 4.759009
| 0.112613
| 0.093706
| 0.079508
| 0.090866
| 0.973971
| 0.973971
| 0.973971
| 0.973971
| 0.973971
| 0.973971
| 0
| 0.00266
| 0.236216
| 3,446
| 84
| 67
| 41.02381
| 0.800152
| 0.068775
| 0
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 1
| 0.088235
| false
| 0
| 0.014706
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
964a22244b7bc5b59810362af73106a524893924
| 55,569
|
py
|
Python
|
build/lib.linux-x86_64-3.7/midi/sequencer/sequencer_alsa.py
|
Mohammed-bjj/python-midi
|
45a44164b2e612e0733326a0fb64ef632b4295de
|
[
"MIT"
] | null | null | null |
build/lib.linux-x86_64-3.7/midi/sequencer/sequencer_alsa.py
|
Mohammed-bjj/python-midi
|
45a44164b2e612e0733326a0fb64ef632b4295de
|
[
"MIT"
] | null | null | null |
build/lib.linux-x86_64-3.7/midi/sequencer/sequencer_alsa.py
|
Mohammed-bjj/python-midi
|
45a44164b2e612e0733326a0fb64ef632b4295de
|
[
"MIT"
] | null | null | null |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.1
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _sequencer_alsa
else:
import _sequencer_alsa
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
def open_client(name, type, stream, mode):
return _sequencer_alsa.open_client(name, type, stream, mode)
def new_port_subscribe():
return _sequencer_alsa.new_port_subscribe()
def new_queue_status(handle, queue):
return _sequencer_alsa.new_queue_status(handle, queue)
def free_queue_status(qstatus):
return _sequencer_alsa.free_queue_status(qstatus)
def new_port_info():
return _sequencer_alsa.new_port_info()
def new_client_info():
return _sequencer_alsa.new_client_info()
def event_input(handle):
return _sequencer_alsa.event_input(handle)
def snd_seq_control_queue_eventless(handle, queue, type, value):
return _sequencer_alsa.snd_seq_control_queue_eventless(handle, queue, type, value)
def init_queue_tempo(handle, queue, bpm, ppq):
return _sequencer_alsa.init_queue_tempo(handle, queue, bpm, ppq)
def client_poll_descriptors(handle):
return _sequencer_alsa.client_poll_descriptors(handle)
SND_SEQ_OPEN_OUTPUT = _sequencer_alsa.SND_SEQ_OPEN_OUTPUT
SND_SEQ_OPEN_INPUT = _sequencer_alsa.SND_SEQ_OPEN_INPUT
SND_SEQ_OPEN_DUPLEX = _sequencer_alsa.SND_SEQ_OPEN_DUPLEX
SND_SEQ_NONBLOCK = _sequencer_alsa.SND_SEQ_NONBLOCK
SND_SEQ_TYPE_HW = _sequencer_alsa.SND_SEQ_TYPE_HW
SND_SEQ_TYPE_SHM = _sequencer_alsa.SND_SEQ_TYPE_SHM
SND_SEQ_TYPE_INET = _sequencer_alsa.SND_SEQ_TYPE_INET
SND_SEQ_ADDRESS_UNKNOWN = _sequencer_alsa.SND_SEQ_ADDRESS_UNKNOWN
SND_SEQ_ADDRESS_SUBSCRIBERS = _sequencer_alsa.SND_SEQ_ADDRESS_SUBSCRIBERS
SND_SEQ_ADDRESS_BROADCAST = _sequencer_alsa.SND_SEQ_ADDRESS_BROADCAST
SND_SEQ_CLIENT_SYSTEM = _sequencer_alsa.SND_SEQ_CLIENT_SYSTEM
def snd_seq_open(handle, name, streams, mode):
return _sequencer_alsa.snd_seq_open(handle, name, streams, mode)
def snd_seq_open_lconf(handle, name, streams, mode, lconf):
return _sequencer_alsa.snd_seq_open_lconf(handle, name, streams, mode, lconf)
def snd_seq_name(seq):
return _sequencer_alsa.snd_seq_name(seq)
def snd_seq_type(seq):
return _sequencer_alsa.snd_seq_type(seq)
def snd_seq_close(handle):
return _sequencer_alsa.snd_seq_close(handle)
def snd_seq_poll_descriptors_count(handle, events):
return _sequencer_alsa.snd_seq_poll_descriptors_count(handle, events)
def snd_seq_poll_descriptors(handle, pfds, space, events):
return _sequencer_alsa.snd_seq_poll_descriptors(handle, pfds, space, events)
def snd_seq_poll_descriptors_revents(seq, pfds, nfds, revents):
return _sequencer_alsa.snd_seq_poll_descriptors_revents(seq, pfds, nfds, revents)
def snd_seq_nonblock(handle, nonblock):
return _sequencer_alsa.snd_seq_nonblock(handle, nonblock)
def snd_seq_client_id(handle):
return _sequencer_alsa.snd_seq_client_id(handle)
def snd_seq_get_output_buffer_size(handle):
return _sequencer_alsa.snd_seq_get_output_buffer_size(handle)
def snd_seq_get_input_buffer_size(handle):
return _sequencer_alsa.snd_seq_get_input_buffer_size(handle)
def snd_seq_set_output_buffer_size(handle, size):
return _sequencer_alsa.snd_seq_set_output_buffer_size(handle, size)
def snd_seq_set_input_buffer_size(handle, size):
return _sequencer_alsa.snd_seq_set_input_buffer_size(handle, size)
def snd_seq_system_info_sizeof():
return _sequencer_alsa.snd_seq_system_info_sizeof()
def snd_seq_system_info_malloc(ptr):
return _sequencer_alsa.snd_seq_system_info_malloc(ptr)
def snd_seq_system_info_free(ptr):
return _sequencer_alsa.snd_seq_system_info_free(ptr)
def snd_seq_system_info_copy(dst, src):
return _sequencer_alsa.snd_seq_system_info_copy(dst, src)
def snd_seq_system_info_get_queues(info):
return _sequencer_alsa.snd_seq_system_info_get_queues(info)
def snd_seq_system_info_get_clients(info):
return _sequencer_alsa.snd_seq_system_info_get_clients(info)
def snd_seq_system_info_get_ports(info):
return _sequencer_alsa.snd_seq_system_info_get_ports(info)
def snd_seq_system_info_get_channels(info):
return _sequencer_alsa.snd_seq_system_info_get_channels(info)
def snd_seq_system_info_get_cur_clients(info):
return _sequencer_alsa.snd_seq_system_info_get_cur_clients(info)
def snd_seq_system_info_get_cur_queues(info):
return _sequencer_alsa.snd_seq_system_info_get_cur_queues(info)
def snd_seq_system_info(handle, info):
return _sequencer_alsa.snd_seq_system_info(handle, info)
SND_SEQ_USER_CLIENT = _sequencer_alsa.SND_SEQ_USER_CLIENT
SND_SEQ_KERNEL_CLIENT = _sequencer_alsa.SND_SEQ_KERNEL_CLIENT
def snd_seq_client_info_sizeof():
return _sequencer_alsa.snd_seq_client_info_sizeof()
def snd_seq_client_info_malloc(ptr):
return _sequencer_alsa.snd_seq_client_info_malloc(ptr)
def snd_seq_client_info_free(ptr):
return _sequencer_alsa.snd_seq_client_info_free(ptr)
def snd_seq_client_info_copy(dst, src):
return _sequencer_alsa.snd_seq_client_info_copy(dst, src)
def snd_seq_client_info_get_client(info):
return _sequencer_alsa.snd_seq_client_info_get_client(info)
def snd_seq_client_info_get_type(info):
return _sequencer_alsa.snd_seq_client_info_get_type(info)
def snd_seq_client_info_get_name(info):
return _sequencer_alsa.snd_seq_client_info_get_name(info)
def snd_seq_client_info_get_broadcast_filter(info):
return _sequencer_alsa.snd_seq_client_info_get_broadcast_filter(info)
def snd_seq_client_info_get_error_bounce(info):
return _sequencer_alsa.snd_seq_client_info_get_error_bounce(info)
def snd_seq_client_info_get_event_filter(info):
return _sequencer_alsa.snd_seq_client_info_get_event_filter(info)
def snd_seq_client_info_get_num_ports(info):
return _sequencer_alsa.snd_seq_client_info_get_num_ports(info)
def snd_seq_client_info_get_event_lost(info):
return _sequencer_alsa.snd_seq_client_info_get_event_lost(info)
def snd_seq_client_info_set_client(info, client):
return _sequencer_alsa.snd_seq_client_info_set_client(info, client)
def snd_seq_client_info_set_name(info, name):
return _sequencer_alsa.snd_seq_client_info_set_name(info, name)
def snd_seq_client_info_set_broadcast_filter(info, val):
return _sequencer_alsa.snd_seq_client_info_set_broadcast_filter(info, val)
def snd_seq_client_info_set_error_bounce(info, val):
return _sequencer_alsa.snd_seq_client_info_set_error_bounce(info, val)
def snd_seq_client_info_set_event_filter(info, filter):
return _sequencer_alsa.snd_seq_client_info_set_event_filter(info, filter)
def snd_seq_client_info_event_filter_clear(info):
return _sequencer_alsa.snd_seq_client_info_event_filter_clear(info)
def snd_seq_client_info_event_filter_add(info, event_type):
return _sequencer_alsa.snd_seq_client_info_event_filter_add(info, event_type)
def snd_seq_client_info_event_filter_del(info, event_type):
return _sequencer_alsa.snd_seq_client_info_event_filter_del(info, event_type)
def snd_seq_client_info_event_filter_check(info, event_type):
return _sequencer_alsa.snd_seq_client_info_event_filter_check(info, event_type)
def snd_seq_get_client_info(handle, info):
return _sequencer_alsa.snd_seq_get_client_info(handle, info)
def snd_seq_get_any_client_info(handle, client, info):
return _sequencer_alsa.snd_seq_get_any_client_info(handle, client, info)
def snd_seq_set_client_info(handle, info):
return _sequencer_alsa.snd_seq_set_client_info(handle, info)
def snd_seq_query_next_client(handle, info):
return _sequencer_alsa.snd_seq_query_next_client(handle, info)
def snd_seq_client_pool_sizeof():
return _sequencer_alsa.snd_seq_client_pool_sizeof()
def snd_seq_client_pool_malloc(ptr):
return _sequencer_alsa.snd_seq_client_pool_malloc(ptr)
def snd_seq_client_pool_free(ptr):
return _sequencer_alsa.snd_seq_client_pool_free(ptr)
def snd_seq_client_pool_copy(dst, src):
return _sequencer_alsa.snd_seq_client_pool_copy(dst, src)
def snd_seq_client_pool_get_client(info):
return _sequencer_alsa.snd_seq_client_pool_get_client(info)
def snd_seq_client_pool_get_output_pool(info):
return _sequencer_alsa.snd_seq_client_pool_get_output_pool(info)
def snd_seq_client_pool_get_input_pool(info):
return _sequencer_alsa.snd_seq_client_pool_get_input_pool(info)
def snd_seq_client_pool_get_output_room(info):
return _sequencer_alsa.snd_seq_client_pool_get_output_room(info)
def snd_seq_client_pool_get_output_free(info):
return _sequencer_alsa.snd_seq_client_pool_get_output_free(info)
def snd_seq_client_pool_get_input_free(info):
return _sequencer_alsa.snd_seq_client_pool_get_input_free(info)
def snd_seq_client_pool_set_output_pool(info, size):
return _sequencer_alsa.snd_seq_client_pool_set_output_pool(info, size)
def snd_seq_client_pool_set_input_pool(info, size):
return _sequencer_alsa.snd_seq_client_pool_set_input_pool(info, size)
def snd_seq_client_pool_set_output_room(info, size):
return _sequencer_alsa.snd_seq_client_pool_set_output_room(info, size)
def snd_seq_get_client_pool(handle, info):
return _sequencer_alsa.snd_seq_get_client_pool(handle, info)
def snd_seq_set_client_pool(handle, info):
return _sequencer_alsa.snd_seq_set_client_pool(handle, info)
SND_SEQ_PORT_SYSTEM_TIMER = _sequencer_alsa.SND_SEQ_PORT_SYSTEM_TIMER
SND_SEQ_PORT_SYSTEM_ANNOUNCE = _sequencer_alsa.SND_SEQ_PORT_SYSTEM_ANNOUNCE
SND_SEQ_PORT_CAP_READ = _sequencer_alsa.SND_SEQ_PORT_CAP_READ
SND_SEQ_PORT_CAP_WRITE = _sequencer_alsa.SND_SEQ_PORT_CAP_WRITE
SND_SEQ_PORT_CAP_SYNC_READ = _sequencer_alsa.SND_SEQ_PORT_CAP_SYNC_READ
SND_SEQ_PORT_CAP_SYNC_WRITE = _sequencer_alsa.SND_SEQ_PORT_CAP_SYNC_WRITE
SND_SEQ_PORT_CAP_DUPLEX = _sequencer_alsa.SND_SEQ_PORT_CAP_DUPLEX
SND_SEQ_PORT_CAP_SUBS_READ = _sequencer_alsa.SND_SEQ_PORT_CAP_SUBS_READ
SND_SEQ_PORT_CAP_SUBS_WRITE = _sequencer_alsa.SND_SEQ_PORT_CAP_SUBS_WRITE
SND_SEQ_PORT_CAP_NO_EXPORT = _sequencer_alsa.SND_SEQ_PORT_CAP_NO_EXPORT
SND_SEQ_PORT_TYPE_SPECIFIC = _sequencer_alsa.SND_SEQ_PORT_TYPE_SPECIFIC
SND_SEQ_PORT_TYPE_MIDI_GENERIC = _sequencer_alsa.SND_SEQ_PORT_TYPE_MIDI_GENERIC
SND_SEQ_PORT_TYPE_MIDI_GM = _sequencer_alsa.SND_SEQ_PORT_TYPE_MIDI_GM
SND_SEQ_PORT_TYPE_MIDI_GS = _sequencer_alsa.SND_SEQ_PORT_TYPE_MIDI_GS
SND_SEQ_PORT_TYPE_MIDI_XG = _sequencer_alsa.SND_SEQ_PORT_TYPE_MIDI_XG
SND_SEQ_PORT_TYPE_MIDI_MT32 = _sequencer_alsa.SND_SEQ_PORT_TYPE_MIDI_MT32
SND_SEQ_PORT_TYPE_MIDI_GM2 = _sequencer_alsa.SND_SEQ_PORT_TYPE_MIDI_GM2
SND_SEQ_PORT_TYPE_SYNTH = _sequencer_alsa.SND_SEQ_PORT_TYPE_SYNTH
SND_SEQ_PORT_TYPE_DIRECT_SAMPLE = _sequencer_alsa.SND_SEQ_PORT_TYPE_DIRECT_SAMPLE
SND_SEQ_PORT_TYPE_SAMPLE = _sequencer_alsa.SND_SEQ_PORT_TYPE_SAMPLE
SND_SEQ_PORT_TYPE_HARDWARE = _sequencer_alsa.SND_SEQ_PORT_TYPE_HARDWARE
SND_SEQ_PORT_TYPE_SOFTWARE = _sequencer_alsa.SND_SEQ_PORT_TYPE_SOFTWARE
SND_SEQ_PORT_TYPE_SYNTHESIZER = _sequencer_alsa.SND_SEQ_PORT_TYPE_SYNTHESIZER
SND_SEQ_PORT_TYPE_PORT = _sequencer_alsa.SND_SEQ_PORT_TYPE_PORT
SND_SEQ_PORT_TYPE_APPLICATION = _sequencer_alsa.SND_SEQ_PORT_TYPE_APPLICATION
def snd_seq_port_info_sizeof():
return _sequencer_alsa.snd_seq_port_info_sizeof()
def snd_seq_port_info_malloc(ptr):
return _sequencer_alsa.snd_seq_port_info_malloc(ptr)
def snd_seq_port_info_free(ptr):
return _sequencer_alsa.snd_seq_port_info_free(ptr)
def snd_seq_port_info_copy(dst, src):
return _sequencer_alsa.snd_seq_port_info_copy(dst, src)
def snd_seq_port_info_get_client(info):
return _sequencer_alsa.snd_seq_port_info_get_client(info)
def snd_seq_port_info_get_port(info):
return _sequencer_alsa.snd_seq_port_info_get_port(info)
def snd_seq_port_info_get_addr(info):
return _sequencer_alsa.snd_seq_port_info_get_addr(info)
def snd_seq_port_info_get_name(info):
return _sequencer_alsa.snd_seq_port_info_get_name(info)
def snd_seq_port_info_get_capability(info):
return _sequencer_alsa.snd_seq_port_info_get_capability(info)
def snd_seq_port_info_get_type(info):
return _sequencer_alsa.snd_seq_port_info_get_type(info)
def snd_seq_port_info_get_midi_channels(info):
return _sequencer_alsa.snd_seq_port_info_get_midi_channels(info)
def snd_seq_port_info_get_midi_voices(info):
return _sequencer_alsa.snd_seq_port_info_get_midi_voices(info)
def snd_seq_port_info_get_synth_voices(info):
return _sequencer_alsa.snd_seq_port_info_get_synth_voices(info)
def snd_seq_port_info_get_read_use(info):
return _sequencer_alsa.snd_seq_port_info_get_read_use(info)
def snd_seq_port_info_get_write_use(info):
return _sequencer_alsa.snd_seq_port_info_get_write_use(info)
def snd_seq_port_info_get_port_specified(info):
return _sequencer_alsa.snd_seq_port_info_get_port_specified(info)
def snd_seq_port_info_get_timestamping(info):
return _sequencer_alsa.snd_seq_port_info_get_timestamping(info)
def snd_seq_port_info_get_timestamp_real(info):
return _sequencer_alsa.snd_seq_port_info_get_timestamp_real(info)
def snd_seq_port_info_get_timestamp_queue(info):
return _sequencer_alsa.snd_seq_port_info_get_timestamp_queue(info)
def snd_seq_port_info_set_client(info, client):
return _sequencer_alsa.snd_seq_port_info_set_client(info, client)
def snd_seq_port_info_set_port(info, port):
return _sequencer_alsa.snd_seq_port_info_set_port(info, port)
def snd_seq_port_info_set_addr(info, addr):
return _sequencer_alsa.snd_seq_port_info_set_addr(info, addr)
def snd_seq_port_info_set_name(info, name):
return _sequencer_alsa.snd_seq_port_info_set_name(info, name)
def snd_seq_port_info_set_capability(info, capability):
return _sequencer_alsa.snd_seq_port_info_set_capability(info, capability)
def snd_seq_port_info_set_type(info, type):
return _sequencer_alsa.snd_seq_port_info_set_type(info, type)
def snd_seq_port_info_set_midi_channels(info, channels):
return _sequencer_alsa.snd_seq_port_info_set_midi_channels(info, channels)
def snd_seq_port_info_set_midi_voices(info, voices):
return _sequencer_alsa.snd_seq_port_info_set_midi_voices(info, voices)
def snd_seq_port_info_set_synth_voices(info, voices):
return _sequencer_alsa.snd_seq_port_info_set_synth_voices(info, voices)
def snd_seq_port_info_set_port_specified(info, val):
return _sequencer_alsa.snd_seq_port_info_set_port_specified(info, val)
def snd_seq_port_info_set_timestamping(info, enable):
return _sequencer_alsa.snd_seq_port_info_set_timestamping(info, enable)
def snd_seq_port_info_set_timestamp_real(info, realtime):
return _sequencer_alsa.snd_seq_port_info_set_timestamp_real(info, realtime)
def snd_seq_port_info_set_timestamp_queue(info, queue):
return _sequencer_alsa.snd_seq_port_info_set_timestamp_queue(info, queue)
def snd_seq_create_port(handle, info):
return _sequencer_alsa.snd_seq_create_port(handle, info)
def snd_seq_delete_port(handle, port):
return _sequencer_alsa.snd_seq_delete_port(handle, port)
def snd_seq_get_port_info(handle, port, info):
return _sequencer_alsa.snd_seq_get_port_info(handle, port, info)
def snd_seq_get_any_port_info(handle, client, port, info):
return _sequencer_alsa.snd_seq_get_any_port_info(handle, client, port, info)
def snd_seq_set_port_info(handle, port, info):
return _sequencer_alsa.snd_seq_set_port_info(handle, port, info)
def snd_seq_query_next_port(handle, info):
return _sequencer_alsa.snd_seq_query_next_port(handle, info)
def snd_seq_port_subscribe_sizeof():
return _sequencer_alsa.snd_seq_port_subscribe_sizeof()
def snd_seq_port_subscribe_malloc(ptr):
return _sequencer_alsa.snd_seq_port_subscribe_malloc(ptr)
def snd_seq_port_subscribe_free(ptr):
return _sequencer_alsa.snd_seq_port_subscribe_free(ptr)
def snd_seq_port_subscribe_copy(dst, src):
return _sequencer_alsa.snd_seq_port_subscribe_copy(dst, src)
def snd_seq_port_subscribe_get_sender(info):
return _sequencer_alsa.snd_seq_port_subscribe_get_sender(info)
def snd_seq_port_subscribe_get_dest(info):
return _sequencer_alsa.snd_seq_port_subscribe_get_dest(info)
def snd_seq_port_subscribe_get_queue(info):
return _sequencer_alsa.snd_seq_port_subscribe_get_queue(info)
def snd_seq_port_subscribe_get_exclusive(info):
return _sequencer_alsa.snd_seq_port_subscribe_get_exclusive(info)
def snd_seq_port_subscribe_get_time_update(info):
return _sequencer_alsa.snd_seq_port_subscribe_get_time_update(info)
def snd_seq_port_subscribe_get_time_real(info):
return _sequencer_alsa.snd_seq_port_subscribe_get_time_real(info)
def snd_seq_port_subscribe_set_sender(info, addr):
return _sequencer_alsa.snd_seq_port_subscribe_set_sender(info, addr)
def snd_seq_port_subscribe_set_dest(info, addr):
return _sequencer_alsa.snd_seq_port_subscribe_set_dest(info, addr)
def snd_seq_port_subscribe_set_queue(info, q):
return _sequencer_alsa.snd_seq_port_subscribe_set_queue(info, q)
def snd_seq_port_subscribe_set_exclusive(info, val):
return _sequencer_alsa.snd_seq_port_subscribe_set_exclusive(info, val)
def snd_seq_port_subscribe_set_time_update(info, val):
return _sequencer_alsa.snd_seq_port_subscribe_set_time_update(info, val)
def snd_seq_port_subscribe_set_time_real(info, val):
return _sequencer_alsa.snd_seq_port_subscribe_set_time_real(info, val)
def snd_seq_get_port_subscription(handle, sub):
return _sequencer_alsa.snd_seq_get_port_subscription(handle, sub)
def snd_seq_subscribe_port(handle, sub):
return _sequencer_alsa.snd_seq_subscribe_port(handle, sub)
def snd_seq_unsubscribe_port(handle, sub):
return _sequencer_alsa.snd_seq_unsubscribe_port(handle, sub)
SND_SEQ_QUERY_SUBS_READ = _sequencer_alsa.SND_SEQ_QUERY_SUBS_READ
SND_SEQ_QUERY_SUBS_WRITE = _sequencer_alsa.SND_SEQ_QUERY_SUBS_WRITE
def snd_seq_query_subscribe_sizeof():
return _sequencer_alsa.snd_seq_query_subscribe_sizeof()
def snd_seq_query_subscribe_malloc(ptr):
return _sequencer_alsa.snd_seq_query_subscribe_malloc(ptr)
def snd_seq_query_subscribe_free(ptr):
return _sequencer_alsa.snd_seq_query_subscribe_free(ptr)
def snd_seq_query_subscribe_copy(dst, src):
return _sequencer_alsa.snd_seq_query_subscribe_copy(dst, src)
def snd_seq_query_subscribe_get_client(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_client(info)
def snd_seq_query_subscribe_get_port(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_port(info)
def snd_seq_query_subscribe_get_root(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_root(info)
def snd_seq_query_subscribe_get_type(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_type(info)
def snd_seq_query_subscribe_get_index(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_index(info)
def snd_seq_query_subscribe_get_num_subs(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_num_subs(info)
def snd_seq_query_subscribe_get_addr(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_addr(info)
def snd_seq_query_subscribe_get_queue(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_queue(info)
def snd_seq_query_subscribe_get_exclusive(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_exclusive(info)
def snd_seq_query_subscribe_get_time_update(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_time_update(info)
def snd_seq_query_subscribe_get_time_real(info):
return _sequencer_alsa.snd_seq_query_subscribe_get_time_real(info)
def snd_seq_query_subscribe_set_client(info, client):
return _sequencer_alsa.snd_seq_query_subscribe_set_client(info, client)
def snd_seq_query_subscribe_set_port(info, port):
return _sequencer_alsa.snd_seq_query_subscribe_set_port(info, port)
def snd_seq_query_subscribe_set_root(info, addr):
return _sequencer_alsa.snd_seq_query_subscribe_set_root(info, addr)
def snd_seq_query_subscribe_set_type(info, type):
return _sequencer_alsa.snd_seq_query_subscribe_set_type(info, type)
def snd_seq_query_subscribe_set_index(info, _index):
return _sequencer_alsa.snd_seq_query_subscribe_set_index(info, _index)
def snd_seq_query_port_subscribers(seq, subs):
return _sequencer_alsa.snd_seq_query_port_subscribers(seq, subs)
SND_SEQ_QUEUE_DIRECT = _sequencer_alsa.SND_SEQ_QUEUE_DIRECT
def snd_seq_queue_info_sizeof():
return _sequencer_alsa.snd_seq_queue_info_sizeof()
def snd_seq_queue_info_malloc(ptr):
return _sequencer_alsa.snd_seq_queue_info_malloc(ptr)
def snd_seq_queue_info_free(ptr):
return _sequencer_alsa.snd_seq_queue_info_free(ptr)
def snd_seq_queue_info_copy(dst, src):
return _sequencer_alsa.snd_seq_queue_info_copy(dst, src)
def snd_seq_queue_info_get_queue(info):
return _sequencer_alsa.snd_seq_queue_info_get_queue(info)
def snd_seq_queue_info_get_name(info):
return _sequencer_alsa.snd_seq_queue_info_get_name(info)
def snd_seq_queue_info_get_owner(info):
return _sequencer_alsa.snd_seq_queue_info_get_owner(info)
def snd_seq_queue_info_get_locked(info):
return _sequencer_alsa.snd_seq_queue_info_get_locked(info)
def snd_seq_queue_info_get_flags(info):
return _sequencer_alsa.snd_seq_queue_info_get_flags(info)
def snd_seq_queue_info_set_name(info, name):
return _sequencer_alsa.snd_seq_queue_info_set_name(info, name)
def snd_seq_queue_info_set_owner(info, owner):
return _sequencer_alsa.snd_seq_queue_info_set_owner(info, owner)
def snd_seq_queue_info_set_locked(info, locked):
return _sequencer_alsa.snd_seq_queue_info_set_locked(info, locked)
def snd_seq_queue_info_set_flags(info, flags):
return _sequencer_alsa.snd_seq_queue_info_set_flags(info, flags)
def snd_seq_create_queue(seq, info):
return _sequencer_alsa.snd_seq_create_queue(seq, info)
def snd_seq_alloc_named_queue(seq, name):
return _sequencer_alsa.snd_seq_alloc_named_queue(seq, name)
def snd_seq_alloc_queue(handle):
return _sequencer_alsa.snd_seq_alloc_queue(handle)
def snd_seq_free_queue(handle, q):
return _sequencer_alsa.snd_seq_free_queue(handle, q)
def snd_seq_get_queue_info(seq, q, info):
return _sequencer_alsa.snd_seq_get_queue_info(seq, q, info)
def snd_seq_set_queue_info(seq, q, info):
return _sequencer_alsa.snd_seq_set_queue_info(seq, q, info)
def snd_seq_query_named_queue(seq, name):
return _sequencer_alsa.snd_seq_query_named_queue(seq, name)
def snd_seq_get_queue_usage(handle, q):
return _sequencer_alsa.snd_seq_get_queue_usage(handle, q)
def snd_seq_set_queue_usage(handle, q, used):
return _sequencer_alsa.snd_seq_set_queue_usage(handle, q, used)
def snd_seq_queue_status_sizeof():
return _sequencer_alsa.snd_seq_queue_status_sizeof()
def snd_seq_queue_status_malloc(ptr):
return _sequencer_alsa.snd_seq_queue_status_malloc(ptr)
def snd_seq_queue_status_free(ptr):
return _sequencer_alsa.snd_seq_queue_status_free(ptr)
def snd_seq_queue_status_copy(dst, src):
return _sequencer_alsa.snd_seq_queue_status_copy(dst, src)
def snd_seq_queue_status_get_queue(info):
return _sequencer_alsa.snd_seq_queue_status_get_queue(info)
def snd_seq_queue_status_get_events(info):
return _sequencer_alsa.snd_seq_queue_status_get_events(info)
def snd_seq_queue_status_get_tick_time(info):
return _sequencer_alsa.snd_seq_queue_status_get_tick_time(info)
def snd_seq_queue_status_get_real_time(info):
return _sequencer_alsa.snd_seq_queue_status_get_real_time(info)
def snd_seq_queue_status_get_status(info):
return _sequencer_alsa.snd_seq_queue_status_get_status(info)
def snd_seq_get_queue_status(handle, q, status):
return _sequencer_alsa.snd_seq_get_queue_status(handle, q, status)
def snd_seq_queue_tempo_sizeof():
return _sequencer_alsa.snd_seq_queue_tempo_sizeof()
def snd_seq_queue_tempo_malloc(ptr):
return _sequencer_alsa.snd_seq_queue_tempo_malloc(ptr)
def snd_seq_queue_tempo_free(ptr):
return _sequencer_alsa.snd_seq_queue_tempo_free(ptr)
def snd_seq_queue_tempo_copy(dst, src):
return _sequencer_alsa.snd_seq_queue_tempo_copy(dst, src)
def snd_seq_queue_tempo_get_queue(info):
return _sequencer_alsa.snd_seq_queue_tempo_get_queue(info)
def snd_seq_queue_tempo_get_tempo(info):
return _sequencer_alsa.snd_seq_queue_tempo_get_tempo(info)
def snd_seq_queue_tempo_get_ppq(info):
return _sequencer_alsa.snd_seq_queue_tempo_get_ppq(info)
def snd_seq_queue_tempo_get_skew(info):
return _sequencer_alsa.snd_seq_queue_tempo_get_skew(info)
def snd_seq_queue_tempo_get_skew_base(info):
return _sequencer_alsa.snd_seq_queue_tempo_get_skew_base(info)
def snd_seq_queue_tempo_set_tempo(info, tempo):
return _sequencer_alsa.snd_seq_queue_tempo_set_tempo(info, tempo)
def snd_seq_queue_tempo_set_ppq(info, ppq):
return _sequencer_alsa.snd_seq_queue_tempo_set_ppq(info, ppq)
def snd_seq_queue_tempo_set_skew(info, skew):
return _sequencer_alsa.snd_seq_queue_tempo_set_skew(info, skew)
def snd_seq_queue_tempo_set_skew_base(info, base):
return _sequencer_alsa.snd_seq_queue_tempo_set_skew_base(info, base)
def snd_seq_get_queue_tempo(handle, q, tempo):
return _sequencer_alsa.snd_seq_get_queue_tempo(handle, q, tempo)
def snd_seq_set_queue_tempo(handle, q, tempo):
return _sequencer_alsa.snd_seq_set_queue_tempo(handle, q, tempo)
SND_SEQ_TIMER_ALSA = _sequencer_alsa.SND_SEQ_TIMER_ALSA
SND_SEQ_TIMER_MIDI_CLOCK = _sequencer_alsa.SND_SEQ_TIMER_MIDI_CLOCK
SND_SEQ_TIMER_MIDI_TICK = _sequencer_alsa.SND_SEQ_TIMER_MIDI_TICK
def snd_seq_queue_timer_sizeof():
return _sequencer_alsa.snd_seq_queue_timer_sizeof()
def snd_seq_queue_timer_malloc(ptr):
return _sequencer_alsa.snd_seq_queue_timer_malloc(ptr)
def snd_seq_queue_timer_free(ptr):
return _sequencer_alsa.snd_seq_queue_timer_free(ptr)
def snd_seq_queue_timer_copy(dst, src):
return _sequencer_alsa.snd_seq_queue_timer_copy(dst, src)
def snd_seq_queue_timer_get_queue(info):
return _sequencer_alsa.snd_seq_queue_timer_get_queue(info)
def snd_seq_queue_timer_get_type(info):
return _sequencer_alsa.snd_seq_queue_timer_get_type(info)
def snd_seq_queue_timer_get_id(info):
return _sequencer_alsa.snd_seq_queue_timer_get_id(info)
def snd_seq_queue_timer_get_resolution(info):
return _sequencer_alsa.snd_seq_queue_timer_get_resolution(info)
def snd_seq_queue_timer_set_type(info, type):
return _sequencer_alsa.snd_seq_queue_timer_set_type(info, type)
def snd_seq_queue_timer_set_id(info, id):
return _sequencer_alsa.snd_seq_queue_timer_set_id(info, id)
def snd_seq_queue_timer_set_resolution(info, resolution):
return _sequencer_alsa.snd_seq_queue_timer_set_resolution(info, resolution)
def snd_seq_get_queue_timer(handle, q, timer):
return _sequencer_alsa.snd_seq_get_queue_timer(handle, q, timer)
def snd_seq_set_queue_timer(handle, q, timer):
return _sequencer_alsa.snd_seq_set_queue_timer(handle, q, timer)
def snd_seq_free_event(ev):
return _sequencer_alsa.snd_seq_free_event(ev)
def snd_seq_event_length(ev):
return _sequencer_alsa.snd_seq_event_length(ev)
def snd_seq_event_output(handle, ev):
return _sequencer_alsa.snd_seq_event_output(handle, ev)
def snd_seq_event_output_buffer(handle, ev):
return _sequencer_alsa.snd_seq_event_output_buffer(handle, ev)
def snd_seq_event_output_direct(handle, ev):
return _sequencer_alsa.snd_seq_event_output_direct(handle, ev)
def snd_seq_event_input(handle, ev):
return _sequencer_alsa.snd_seq_event_input(handle, ev)
def snd_seq_event_input_pending(seq, fetch_sequencer):
return _sequencer_alsa.snd_seq_event_input_pending(seq, fetch_sequencer)
def snd_seq_drain_output(handle):
return _sequencer_alsa.snd_seq_drain_output(handle)
def snd_seq_event_output_pending(seq):
return _sequencer_alsa.snd_seq_event_output_pending(seq)
def snd_seq_extract_output(handle, ev):
return _sequencer_alsa.snd_seq_extract_output(handle, ev)
def snd_seq_drop_output(handle):
return _sequencer_alsa.snd_seq_drop_output(handle)
def snd_seq_drop_output_buffer(handle):
return _sequencer_alsa.snd_seq_drop_output_buffer(handle)
def snd_seq_drop_input(handle):
return _sequencer_alsa.snd_seq_drop_input(handle)
def snd_seq_drop_input_buffer(handle):
return _sequencer_alsa.snd_seq_drop_input_buffer(handle)
SND_SEQ_REMOVE_INPUT = _sequencer_alsa.SND_SEQ_REMOVE_INPUT
SND_SEQ_REMOVE_OUTPUT = _sequencer_alsa.SND_SEQ_REMOVE_OUTPUT
SND_SEQ_REMOVE_DEST = _sequencer_alsa.SND_SEQ_REMOVE_DEST
SND_SEQ_REMOVE_DEST_CHANNEL = _sequencer_alsa.SND_SEQ_REMOVE_DEST_CHANNEL
SND_SEQ_REMOVE_TIME_BEFORE = _sequencer_alsa.SND_SEQ_REMOVE_TIME_BEFORE
SND_SEQ_REMOVE_TIME_AFTER = _sequencer_alsa.SND_SEQ_REMOVE_TIME_AFTER
SND_SEQ_REMOVE_TIME_TICK = _sequencer_alsa.SND_SEQ_REMOVE_TIME_TICK
SND_SEQ_REMOVE_EVENT_TYPE = _sequencer_alsa.SND_SEQ_REMOVE_EVENT_TYPE
SND_SEQ_REMOVE_IGNORE_OFF = _sequencer_alsa.SND_SEQ_REMOVE_IGNORE_OFF
SND_SEQ_REMOVE_TAG_MATCH = _sequencer_alsa.SND_SEQ_REMOVE_TAG_MATCH
def snd_seq_remove_events_sizeof():
return _sequencer_alsa.snd_seq_remove_events_sizeof()
def snd_seq_remove_events_malloc(ptr):
return _sequencer_alsa.snd_seq_remove_events_malloc(ptr)
def snd_seq_remove_events_free(ptr):
return _sequencer_alsa.snd_seq_remove_events_free(ptr)
def snd_seq_remove_events_copy(dst, src):
return _sequencer_alsa.snd_seq_remove_events_copy(dst, src)
def snd_seq_remove_events_get_condition(info):
return _sequencer_alsa.snd_seq_remove_events_get_condition(info)
def snd_seq_remove_events_get_queue(info):
return _sequencer_alsa.snd_seq_remove_events_get_queue(info)
def snd_seq_remove_events_get_time(info):
return _sequencer_alsa.snd_seq_remove_events_get_time(info)
def snd_seq_remove_events_get_dest(info):
return _sequencer_alsa.snd_seq_remove_events_get_dest(info)
def snd_seq_remove_events_get_channel(info):
return _sequencer_alsa.snd_seq_remove_events_get_channel(info)
def snd_seq_remove_events_get_event_type(info):
return _sequencer_alsa.snd_seq_remove_events_get_event_type(info)
def snd_seq_remove_events_get_tag(info):
return _sequencer_alsa.snd_seq_remove_events_get_tag(info)
def snd_seq_remove_events_set_condition(info, flags):
return _sequencer_alsa.snd_seq_remove_events_set_condition(info, flags)
def snd_seq_remove_events_set_queue(info, queue):
return _sequencer_alsa.snd_seq_remove_events_set_queue(info, queue)
def snd_seq_remove_events_set_time(info, time):
return _sequencer_alsa.snd_seq_remove_events_set_time(info, time)
def snd_seq_remove_events_set_dest(info, addr):
return _sequencer_alsa.snd_seq_remove_events_set_dest(info, addr)
def snd_seq_remove_events_set_channel(info, channel):
return _sequencer_alsa.snd_seq_remove_events_set_channel(info, channel)
def snd_seq_remove_events_set_event_type(info, type):
return _sequencer_alsa.snd_seq_remove_events_set_event_type(info, type)
def snd_seq_remove_events_set_tag(info, tag):
return _sequencer_alsa.snd_seq_remove_events_set_tag(info, tag)
def snd_seq_remove_events(handle, info):
return _sequencer_alsa.snd_seq_remove_events(handle, info)
def snd_seq_set_bit(nr, array):
return _sequencer_alsa.snd_seq_set_bit(nr, array)
def snd_seq_unset_bit(nr, array):
return _sequencer_alsa.snd_seq_unset_bit(nr, array)
def snd_seq_change_bit(nr, array):
return _sequencer_alsa.snd_seq_change_bit(nr, array)
def snd_seq_get_bit(nr, array):
return _sequencer_alsa.snd_seq_get_bit(nr, array)
SND_SEQ_EVFLG_RESULT = _sequencer_alsa.SND_SEQ_EVFLG_RESULT
SND_SEQ_EVFLG_NOTE = _sequencer_alsa.SND_SEQ_EVFLG_NOTE
SND_SEQ_EVFLG_CONTROL = _sequencer_alsa.SND_SEQ_EVFLG_CONTROL
SND_SEQ_EVFLG_QUEUE = _sequencer_alsa.SND_SEQ_EVFLG_QUEUE
SND_SEQ_EVFLG_SYSTEM = _sequencer_alsa.SND_SEQ_EVFLG_SYSTEM
SND_SEQ_EVFLG_MESSAGE = _sequencer_alsa.SND_SEQ_EVFLG_MESSAGE
SND_SEQ_EVFLG_CONNECTION = _sequencer_alsa.SND_SEQ_EVFLG_CONNECTION
SND_SEQ_EVFLG_SAMPLE = _sequencer_alsa.SND_SEQ_EVFLG_SAMPLE
SND_SEQ_EVFLG_USERS = _sequencer_alsa.SND_SEQ_EVFLG_USERS
SND_SEQ_EVFLG_INSTR = _sequencer_alsa.SND_SEQ_EVFLG_INSTR
SND_SEQ_EVFLG_QUOTE = _sequencer_alsa.SND_SEQ_EVFLG_QUOTE
SND_SEQ_EVFLG_NONE = _sequencer_alsa.SND_SEQ_EVFLG_NONE
SND_SEQ_EVFLG_RAW = _sequencer_alsa.SND_SEQ_EVFLG_RAW
SND_SEQ_EVFLG_FIXED = _sequencer_alsa.SND_SEQ_EVFLG_FIXED
SND_SEQ_EVFLG_VARIABLE = _sequencer_alsa.SND_SEQ_EVFLG_VARIABLE
SND_SEQ_EVFLG_VARUSR = _sequencer_alsa.SND_SEQ_EVFLG_VARUSR
SND_SEQ_EVFLG_NOTE_ONEARG = _sequencer_alsa.SND_SEQ_EVFLG_NOTE_ONEARG
SND_SEQ_EVFLG_NOTE_TWOARG = _sequencer_alsa.SND_SEQ_EVFLG_NOTE_TWOARG
SND_SEQ_EVFLG_QUEUE_NOARG = _sequencer_alsa.SND_SEQ_EVFLG_QUEUE_NOARG
SND_SEQ_EVFLG_QUEUE_TICK = _sequencer_alsa.SND_SEQ_EVFLG_QUEUE_TICK
SND_SEQ_EVFLG_QUEUE_TIME = _sequencer_alsa.SND_SEQ_EVFLG_QUEUE_TIME
SND_SEQ_EVFLG_QUEUE_VALUE = _sequencer_alsa.SND_SEQ_EVFLG_QUEUE_VALUE
def snd_seq_control_queue(seq, q, type, value, ev):
return _sequencer_alsa.snd_seq_control_queue(seq, q, type, value, ev)
def snd_seq_create_simple_port(seq, name, caps, type):
return _sequencer_alsa.snd_seq_create_simple_port(seq, name, caps, type)
def snd_seq_delete_simple_port(seq, port):
return _sequencer_alsa.snd_seq_delete_simple_port(seq, port)
def snd_seq_connect_from(seq, my_port, src_client, src_port):
return _sequencer_alsa.snd_seq_connect_from(seq, my_port, src_client, src_port)
def snd_seq_connect_to(seq, my_port, dest_client, dest_port):
return _sequencer_alsa.snd_seq_connect_to(seq, my_port, dest_client, dest_port)
def snd_seq_disconnect_from(seq, my_port, src_client, src_port):
return _sequencer_alsa.snd_seq_disconnect_from(seq, my_port, src_client, src_port)
def snd_seq_disconnect_to(seq, my_port, dest_client, dest_port):
return _sequencer_alsa.snd_seq_disconnect_to(seq, my_port, dest_client, dest_port)
def snd_seq_set_client_name(seq, name):
return _sequencer_alsa.snd_seq_set_client_name(seq, name)
def snd_seq_set_client_event_filter(seq, event_type):
return _sequencer_alsa.snd_seq_set_client_event_filter(seq, event_type)
def snd_seq_set_client_pool_output(seq, size):
return _sequencer_alsa.snd_seq_set_client_pool_output(seq, size)
def snd_seq_set_client_pool_output_room(seq, size):
return _sequencer_alsa.snd_seq_set_client_pool_output_room(seq, size)
def snd_seq_set_client_pool_input(seq, size):
return _sequencer_alsa.snd_seq_set_client_pool_input(seq, size)
def snd_seq_sync_output_queue(seq):
return _sequencer_alsa.snd_seq_sync_output_queue(seq)
def snd_seq_parse_address(seq, addr, str):
return _sequencer_alsa.snd_seq_parse_address(seq, addr, str)
def snd_seq_reset_pool_output(seq):
return _sequencer_alsa.snd_seq_reset_pool_output(seq)
def snd_seq_reset_pool_input(seq):
return _sequencer_alsa.snd_seq_reset_pool_input(seq)
SND_SEQ_EVENT_SYSTEM = _sequencer_alsa.SND_SEQ_EVENT_SYSTEM
SND_SEQ_EVENT_RESULT = _sequencer_alsa.SND_SEQ_EVENT_RESULT
SND_SEQ_EVENT_NOTE = _sequencer_alsa.SND_SEQ_EVENT_NOTE
SND_SEQ_EVENT_NOTEON = _sequencer_alsa.SND_SEQ_EVENT_NOTEON
SND_SEQ_EVENT_NOTEOFF = _sequencer_alsa.SND_SEQ_EVENT_NOTEOFF
SND_SEQ_EVENT_KEYPRESS = _sequencer_alsa.SND_SEQ_EVENT_KEYPRESS
SND_SEQ_EVENT_CONTROLLER = _sequencer_alsa.SND_SEQ_EVENT_CONTROLLER
SND_SEQ_EVENT_PGMCHANGE = _sequencer_alsa.SND_SEQ_EVENT_PGMCHANGE
SND_SEQ_EVENT_CHANPRESS = _sequencer_alsa.SND_SEQ_EVENT_CHANPRESS
SND_SEQ_EVENT_PITCHBEND = _sequencer_alsa.SND_SEQ_EVENT_PITCHBEND
SND_SEQ_EVENT_CONTROL14 = _sequencer_alsa.SND_SEQ_EVENT_CONTROL14
SND_SEQ_EVENT_NONREGPARAM = _sequencer_alsa.SND_SEQ_EVENT_NONREGPARAM
SND_SEQ_EVENT_REGPARAM = _sequencer_alsa.SND_SEQ_EVENT_REGPARAM
SND_SEQ_EVENT_SONGPOS = _sequencer_alsa.SND_SEQ_EVENT_SONGPOS
SND_SEQ_EVENT_SONGSEL = _sequencer_alsa.SND_SEQ_EVENT_SONGSEL
SND_SEQ_EVENT_QFRAME = _sequencer_alsa.SND_SEQ_EVENT_QFRAME
SND_SEQ_EVENT_TIMESIGN = _sequencer_alsa.SND_SEQ_EVENT_TIMESIGN
SND_SEQ_EVENT_KEYSIGN = _sequencer_alsa.SND_SEQ_EVENT_KEYSIGN
SND_SEQ_EVENT_START = _sequencer_alsa.SND_SEQ_EVENT_START
SND_SEQ_EVENT_CONTINUE = _sequencer_alsa.SND_SEQ_EVENT_CONTINUE
SND_SEQ_EVENT_STOP = _sequencer_alsa.SND_SEQ_EVENT_STOP
SND_SEQ_EVENT_SETPOS_TICK = _sequencer_alsa.SND_SEQ_EVENT_SETPOS_TICK
SND_SEQ_EVENT_SETPOS_TIME = _sequencer_alsa.SND_SEQ_EVENT_SETPOS_TIME
SND_SEQ_EVENT_TEMPO = _sequencer_alsa.SND_SEQ_EVENT_TEMPO
SND_SEQ_EVENT_CLOCK = _sequencer_alsa.SND_SEQ_EVENT_CLOCK
SND_SEQ_EVENT_TICK = _sequencer_alsa.SND_SEQ_EVENT_TICK
SND_SEQ_EVENT_QUEUE_SKEW = _sequencer_alsa.SND_SEQ_EVENT_QUEUE_SKEW
SND_SEQ_EVENT_SYNC_POS = _sequencer_alsa.SND_SEQ_EVENT_SYNC_POS
SND_SEQ_EVENT_TUNE_REQUEST = _sequencer_alsa.SND_SEQ_EVENT_TUNE_REQUEST
SND_SEQ_EVENT_RESET = _sequencer_alsa.SND_SEQ_EVENT_RESET
SND_SEQ_EVENT_SENSING = _sequencer_alsa.SND_SEQ_EVENT_SENSING
SND_SEQ_EVENT_ECHO = _sequencer_alsa.SND_SEQ_EVENT_ECHO
SND_SEQ_EVENT_OSS = _sequencer_alsa.SND_SEQ_EVENT_OSS
SND_SEQ_EVENT_CLIENT_START = _sequencer_alsa.SND_SEQ_EVENT_CLIENT_START
SND_SEQ_EVENT_CLIENT_EXIT = _sequencer_alsa.SND_SEQ_EVENT_CLIENT_EXIT
SND_SEQ_EVENT_CLIENT_CHANGE = _sequencer_alsa.SND_SEQ_EVENT_CLIENT_CHANGE
SND_SEQ_EVENT_PORT_START = _sequencer_alsa.SND_SEQ_EVENT_PORT_START
SND_SEQ_EVENT_PORT_EXIT = _sequencer_alsa.SND_SEQ_EVENT_PORT_EXIT
SND_SEQ_EVENT_PORT_CHANGE = _sequencer_alsa.SND_SEQ_EVENT_PORT_CHANGE
SND_SEQ_EVENT_PORT_SUBSCRIBED = _sequencer_alsa.SND_SEQ_EVENT_PORT_SUBSCRIBED
SND_SEQ_EVENT_PORT_UNSUBSCRIBED = _sequencer_alsa.SND_SEQ_EVENT_PORT_UNSUBSCRIBED
SND_SEQ_EVENT_USR0 = _sequencer_alsa.SND_SEQ_EVENT_USR0
SND_SEQ_EVENT_USR1 = _sequencer_alsa.SND_SEQ_EVENT_USR1
SND_SEQ_EVENT_USR2 = _sequencer_alsa.SND_SEQ_EVENT_USR2
SND_SEQ_EVENT_USR3 = _sequencer_alsa.SND_SEQ_EVENT_USR3
SND_SEQ_EVENT_USR4 = _sequencer_alsa.SND_SEQ_EVENT_USR4
SND_SEQ_EVENT_USR5 = _sequencer_alsa.SND_SEQ_EVENT_USR5
SND_SEQ_EVENT_USR6 = _sequencer_alsa.SND_SEQ_EVENT_USR6
SND_SEQ_EVENT_USR7 = _sequencer_alsa.SND_SEQ_EVENT_USR7
SND_SEQ_EVENT_USR8 = _sequencer_alsa.SND_SEQ_EVENT_USR8
SND_SEQ_EVENT_USR9 = _sequencer_alsa.SND_SEQ_EVENT_USR9
SND_SEQ_EVENT_SYSEX = _sequencer_alsa.SND_SEQ_EVENT_SYSEX
SND_SEQ_EVENT_BOUNCE = _sequencer_alsa.SND_SEQ_EVENT_BOUNCE
SND_SEQ_EVENT_USR_VAR0 = _sequencer_alsa.SND_SEQ_EVENT_USR_VAR0
SND_SEQ_EVENT_USR_VAR1 = _sequencer_alsa.SND_SEQ_EVENT_USR_VAR1
SND_SEQ_EVENT_USR_VAR2 = _sequencer_alsa.SND_SEQ_EVENT_USR_VAR2
SND_SEQ_EVENT_USR_VAR3 = _sequencer_alsa.SND_SEQ_EVENT_USR_VAR3
SND_SEQ_EVENT_USR_VAR4 = _sequencer_alsa.SND_SEQ_EVENT_USR_VAR4
SND_SEQ_EVENT_NONE = _sequencer_alsa.SND_SEQ_EVENT_NONE
class snd_seq_addr_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
client = property(_sequencer_alsa.snd_seq_addr_t_client_get, _sequencer_alsa.snd_seq_addr_t_client_set)
port = property(_sequencer_alsa.snd_seq_addr_t_port_get, _sequencer_alsa.snd_seq_addr_t_port_set)
def __init__(self):
_sequencer_alsa.snd_seq_addr_t_swiginit(self, _sequencer_alsa.new_snd_seq_addr_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_addr_t
# Register snd_seq_addr_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_addr_t_swigregister(snd_seq_addr_t)
cvar = _sequencer_alsa.cvar
snd_seq_event_types = cvar.snd_seq_event_types
class snd_seq_connect_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
sender = property(_sequencer_alsa.snd_seq_connect_t_sender_get, _sequencer_alsa.snd_seq_connect_t_sender_set)
dest = property(_sequencer_alsa.snd_seq_connect_t_dest_get, _sequencer_alsa.snd_seq_connect_t_dest_set)
def __init__(self):
_sequencer_alsa.snd_seq_connect_t_swiginit(self, _sequencer_alsa.new_snd_seq_connect_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_connect_t
# Register snd_seq_connect_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_connect_t_swigregister(snd_seq_connect_t)
class snd_seq_real_time_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
tv_sec = property(_sequencer_alsa.snd_seq_real_time_t_tv_sec_get, _sequencer_alsa.snd_seq_real_time_t_tv_sec_set)
tv_nsec = property(_sequencer_alsa.snd_seq_real_time_t_tv_nsec_get, _sequencer_alsa.snd_seq_real_time_t_tv_nsec_set)
def __init__(self):
_sequencer_alsa.snd_seq_real_time_t_swiginit(self, _sequencer_alsa.new_snd_seq_real_time_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_real_time_t
# Register snd_seq_real_time_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_real_time_t_swigregister(snd_seq_real_time_t)
class snd_seq_timestamp_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
tick = property(_sequencer_alsa.snd_seq_timestamp_t_tick_get, _sequencer_alsa.snd_seq_timestamp_t_tick_set)
time = property(_sequencer_alsa.snd_seq_timestamp_t_time_get, _sequencer_alsa.snd_seq_timestamp_t_time_set)
def __init__(self):
_sequencer_alsa.snd_seq_timestamp_t_swiginit(self, _sequencer_alsa.new_snd_seq_timestamp_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_timestamp_t
# Register snd_seq_timestamp_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_timestamp_t_swigregister(snd_seq_timestamp_t)
SND_SEQ_TIME_STAMP_TICK = _sequencer_alsa.SND_SEQ_TIME_STAMP_TICK
SND_SEQ_TIME_STAMP_REAL = _sequencer_alsa.SND_SEQ_TIME_STAMP_REAL
SND_SEQ_TIME_STAMP_MASK = _sequencer_alsa.SND_SEQ_TIME_STAMP_MASK
SND_SEQ_TIME_MODE_ABS = _sequencer_alsa.SND_SEQ_TIME_MODE_ABS
SND_SEQ_TIME_MODE_REL = _sequencer_alsa.SND_SEQ_TIME_MODE_REL
SND_SEQ_TIME_MODE_MASK = _sequencer_alsa.SND_SEQ_TIME_MODE_MASK
SND_SEQ_EVENT_LENGTH_FIXED = _sequencer_alsa.SND_SEQ_EVENT_LENGTH_FIXED
SND_SEQ_EVENT_LENGTH_VARIABLE = _sequencer_alsa.SND_SEQ_EVENT_LENGTH_VARIABLE
SND_SEQ_EVENT_LENGTH_VARUSR = _sequencer_alsa.SND_SEQ_EVENT_LENGTH_VARUSR
SND_SEQ_EVENT_LENGTH_MASK = _sequencer_alsa.SND_SEQ_EVENT_LENGTH_MASK
SND_SEQ_PRIORITY_NORMAL = _sequencer_alsa.SND_SEQ_PRIORITY_NORMAL
SND_SEQ_PRIORITY_HIGH = _sequencer_alsa.SND_SEQ_PRIORITY_HIGH
SND_SEQ_PRIORITY_MASK = _sequencer_alsa.SND_SEQ_PRIORITY_MASK
class snd_seq_ev_note_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
channel = property(_sequencer_alsa.snd_seq_ev_note_t_channel_get, _sequencer_alsa.snd_seq_ev_note_t_channel_set)
note = property(_sequencer_alsa.snd_seq_ev_note_t_note_get, _sequencer_alsa.snd_seq_ev_note_t_note_set)
velocity = property(_sequencer_alsa.snd_seq_ev_note_t_velocity_get, _sequencer_alsa.snd_seq_ev_note_t_velocity_set)
off_velocity = property(_sequencer_alsa.snd_seq_ev_note_t_off_velocity_get, _sequencer_alsa.snd_seq_ev_note_t_off_velocity_set)
duration = property(_sequencer_alsa.snd_seq_ev_note_t_duration_get, _sequencer_alsa.snd_seq_ev_note_t_duration_set)
def __init__(self):
_sequencer_alsa.snd_seq_ev_note_t_swiginit(self, _sequencer_alsa.new_snd_seq_ev_note_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_ev_note_t
# Register snd_seq_ev_note_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_ev_note_t_swigregister(snd_seq_ev_note_t)
class snd_seq_ev_ctrl_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
channel = property(_sequencer_alsa.snd_seq_ev_ctrl_t_channel_get, _sequencer_alsa.snd_seq_ev_ctrl_t_channel_set)
unused = property(_sequencer_alsa.snd_seq_ev_ctrl_t_unused_get, _sequencer_alsa.snd_seq_ev_ctrl_t_unused_set)
param = property(_sequencer_alsa.snd_seq_ev_ctrl_t_param_get, _sequencer_alsa.snd_seq_ev_ctrl_t_param_set)
value = property(_sequencer_alsa.snd_seq_ev_ctrl_t_value_get, _sequencer_alsa.snd_seq_ev_ctrl_t_value_set)
def __init__(self):
_sequencer_alsa.snd_seq_ev_ctrl_t_swiginit(self, _sequencer_alsa.new_snd_seq_ev_ctrl_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_ev_ctrl_t
# Register snd_seq_ev_ctrl_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_ev_ctrl_t_swigregister(snd_seq_ev_ctrl_t)
class snd_seq_ev_raw8_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
d = property(_sequencer_alsa.snd_seq_ev_raw8_t_d_get, _sequencer_alsa.snd_seq_ev_raw8_t_d_set)
def __init__(self):
_sequencer_alsa.snd_seq_ev_raw8_t_swiginit(self, _sequencer_alsa.new_snd_seq_ev_raw8_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_ev_raw8_t
# Register snd_seq_ev_raw8_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_ev_raw8_t_swigregister(snd_seq_ev_raw8_t)
class snd_seq_ev_raw32_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
d = property(_sequencer_alsa.snd_seq_ev_raw32_t_d_get, _sequencer_alsa.snd_seq_ev_raw32_t_d_set)
def __init__(self):
_sequencer_alsa.snd_seq_ev_raw32_t_swiginit(self, _sequencer_alsa.new_snd_seq_ev_raw32_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_ev_raw32_t
# Register snd_seq_ev_raw32_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_ev_raw32_t_swigregister(snd_seq_ev_raw32_t)
class snd_seq_ev_ext_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
len = property(_sequencer_alsa.snd_seq_ev_ext_t_len_get, _sequencer_alsa.snd_seq_ev_ext_t_len_set)
ptr = property(_sequencer_alsa.snd_seq_ev_ext_t_ptr_get, _sequencer_alsa.snd_seq_ev_ext_t_ptr_set)
def __init__(self):
_sequencer_alsa.snd_seq_ev_ext_t_swiginit(self, _sequencer_alsa.new_snd_seq_ev_ext_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_ev_ext_t
# Register snd_seq_ev_ext_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_ev_ext_t_swigregister(snd_seq_ev_ext_t)
class snd_seq_result_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
event = property(_sequencer_alsa.snd_seq_result_t_event_get, _sequencer_alsa.snd_seq_result_t_event_set)
result = property(_sequencer_alsa.snd_seq_result_t_result_get, _sequencer_alsa.snd_seq_result_t_result_set)
def __init__(self):
_sequencer_alsa.snd_seq_result_t_swiginit(self, _sequencer_alsa.new_snd_seq_result_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_result_t
# Register snd_seq_result_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_result_t_swigregister(snd_seq_result_t)
class snd_seq_queue_skew_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
value = property(_sequencer_alsa.snd_seq_queue_skew_t_value_get, _sequencer_alsa.snd_seq_queue_skew_t_value_set)
base = property(_sequencer_alsa.snd_seq_queue_skew_t_base_get, _sequencer_alsa.snd_seq_queue_skew_t_base_set)
def __init__(self):
_sequencer_alsa.snd_seq_queue_skew_t_swiginit(self, _sequencer_alsa.new_snd_seq_queue_skew_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_queue_skew_t
# Register snd_seq_queue_skew_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_queue_skew_t_swigregister(snd_seq_queue_skew_t)
class snd_seq_ev_queue_control_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
queue = property(_sequencer_alsa.snd_seq_ev_queue_control_t_queue_get, _sequencer_alsa.snd_seq_ev_queue_control_t_queue_set)
unused = property(_sequencer_alsa.snd_seq_ev_queue_control_t_unused_get, _sequencer_alsa.snd_seq_ev_queue_control_t_unused_set)
param = property(_sequencer_alsa.snd_seq_ev_queue_control_t_param_get)
def __init__(self):
_sequencer_alsa.snd_seq_ev_queue_control_t_swiginit(self, _sequencer_alsa.new_snd_seq_ev_queue_control_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_ev_queue_control_t
# Register snd_seq_ev_queue_control_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_ev_queue_control_t_swigregister(snd_seq_ev_queue_control_t)
class snd_seq_ev_queue_control_param(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
value = property(_sequencer_alsa.snd_seq_ev_queue_control_param_value_get, _sequencer_alsa.snd_seq_ev_queue_control_param_value_set)
time = property(_sequencer_alsa.snd_seq_ev_queue_control_param_time_get, _sequencer_alsa.snd_seq_ev_queue_control_param_time_set)
position = property(_sequencer_alsa.snd_seq_ev_queue_control_param_position_get, _sequencer_alsa.snd_seq_ev_queue_control_param_position_set)
skew = property(_sequencer_alsa.snd_seq_ev_queue_control_param_skew_get, _sequencer_alsa.snd_seq_ev_queue_control_param_skew_set)
d32 = property(_sequencer_alsa.snd_seq_ev_queue_control_param_d32_get, _sequencer_alsa.snd_seq_ev_queue_control_param_d32_set)
d8 = property(_sequencer_alsa.snd_seq_ev_queue_control_param_d8_get, _sequencer_alsa.snd_seq_ev_queue_control_param_d8_set)
def __init__(self):
_sequencer_alsa.snd_seq_ev_queue_control_param_swiginit(self, _sequencer_alsa.new_snd_seq_ev_queue_control_param())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_ev_queue_control_param
# Register snd_seq_ev_queue_control_param in _sequencer_alsa:
_sequencer_alsa.snd_seq_ev_queue_control_param_swigregister(snd_seq_ev_queue_control_param)
class snd_seq_event_t(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
type = property(_sequencer_alsa.snd_seq_event_t_type_get, _sequencer_alsa.snd_seq_event_t_type_set)
flags = property(_sequencer_alsa.snd_seq_event_t_flags_get, _sequencer_alsa.snd_seq_event_t_flags_set)
tag = property(_sequencer_alsa.snd_seq_event_t_tag_get, _sequencer_alsa.snd_seq_event_t_tag_set)
queue = property(_sequencer_alsa.snd_seq_event_t_queue_get, _sequencer_alsa.snd_seq_event_t_queue_set)
time = property(_sequencer_alsa.snd_seq_event_t_time_get, _sequencer_alsa.snd_seq_event_t_time_set)
source = property(_sequencer_alsa.snd_seq_event_t_source_get, _sequencer_alsa.snd_seq_event_t_source_set)
dest = property(_sequencer_alsa.snd_seq_event_t_dest_get, _sequencer_alsa.snd_seq_event_t_dest_set)
data = property(_sequencer_alsa.snd_seq_event_t_data_get)
def __init__(self):
_sequencer_alsa.snd_seq_event_t_swiginit(self, _sequencer_alsa.new_snd_seq_event_t())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_event_t
# Register snd_seq_event_t in _sequencer_alsa:
_sequencer_alsa.snd_seq_event_t_swigregister(snd_seq_event_t)
class snd_seq_event_data(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
note = property(_sequencer_alsa.snd_seq_event_data_note_get, _sequencer_alsa.snd_seq_event_data_note_set)
control = property(_sequencer_alsa.snd_seq_event_data_control_get, _sequencer_alsa.snd_seq_event_data_control_set)
raw8 = property(_sequencer_alsa.snd_seq_event_data_raw8_get, _sequencer_alsa.snd_seq_event_data_raw8_set)
raw32 = property(_sequencer_alsa.snd_seq_event_data_raw32_get, _sequencer_alsa.snd_seq_event_data_raw32_set)
ext = property(_sequencer_alsa.snd_seq_event_data_ext_get, _sequencer_alsa.snd_seq_event_data_ext_set)
queue = property(_sequencer_alsa.snd_seq_event_data_queue_get, _sequencer_alsa.snd_seq_event_data_queue_set)
time = property(_sequencer_alsa.snd_seq_event_data_time_get, _sequencer_alsa.snd_seq_event_data_time_set)
addr = property(_sequencer_alsa.snd_seq_event_data_addr_get, _sequencer_alsa.snd_seq_event_data_addr_set)
connect = property(_sequencer_alsa.snd_seq_event_data_connect_get, _sequencer_alsa.snd_seq_event_data_connect_set)
result = property(_sequencer_alsa.snd_seq_event_data_result_get, _sequencer_alsa.snd_seq_event_data_result_set)
def __init__(self):
_sequencer_alsa.snd_seq_event_data_swiginit(self, _sequencer_alsa.new_snd_seq_event_data())
__swig_destroy__ = _sequencer_alsa.delete_snd_seq_event_data
# Register snd_seq_event_data in _sequencer_alsa:
_sequencer_alsa.snd_seq_event_data_swigregister(snd_seq_event_data)
def snd_midi_event_new(bufsize, rdev):
return _sequencer_alsa.snd_midi_event_new(bufsize, rdev)
def snd_midi_event_resize_buffer(dev, bufsize):
return _sequencer_alsa.snd_midi_event_resize_buffer(dev, bufsize)
def snd_midi_event_free(dev):
return _sequencer_alsa.snd_midi_event_free(dev)
def snd_midi_event_init(dev):
return _sequencer_alsa.snd_midi_event_init(dev)
def snd_midi_event_reset_encode(dev):
return _sequencer_alsa.snd_midi_event_reset_encode(dev)
def snd_midi_event_reset_decode(dev):
return _sequencer_alsa.snd_midi_event_reset_decode(dev)
def snd_midi_event_no_status(dev, on):
return _sequencer_alsa.snd_midi_event_no_status(dev, on)
def snd_midi_event_encode(dev, buf, count, ev):
return _sequencer_alsa.snd_midi_event_encode(dev, buf, count, ev)
def snd_midi_event_encode_byte(dev, c, ev):
return _sequencer_alsa.snd_midi_event_encode_byte(dev, c, ev)
def snd_midi_event_decode(dev, buf, count, ev):
return _sequencer_alsa.snd_midi_event_decode(dev, buf, count, ev)
SND_ERROR_BEGIN = _sequencer_alsa.SND_ERROR_BEGIN
SND_ERROR_INCOMPATIBLE_VERSION = _sequencer_alsa.SND_ERROR_INCOMPATIBLE_VERSION
SND_ERROR_ALISP_NIL = _sequencer_alsa.SND_ERROR_ALISP_NIL
def snd_strerror(errnum):
return _sequencer_alsa.snd_strerror(errnum)
def snd_lib_error_set_handler(handler):
return _sequencer_alsa.snd_lib_error_set_handler(handler)
| 43.755118
| 145
| 0.847289
| 9,199
| 55,569
| 4.448527
| 0.041635
| 0.149406
| 0.215825
| 0.249328
| 0.898856
| 0.781658
| 0.682225
| 0.529202
| 0.32193
| 0.143737
| 0
| 0.001841
| 0.090752
| 55,569
| 1,269
| 146
| 43.789598
| 0.808079
| 0.020515
| 0
| 0.057332
| 1
| 0
| 0.007776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.330761
| false
| 0
| 0.006615
| 0.307607
| 0.775083
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
73731a3520208f0959e75a527969e4db37dc6523
| 70
|
py
|
Python
|
nodes/IGHighMap/__init__.py
|
bertrandboudaud/imagegraph
|
7c95b645edeb1a68d56c6c3b19f1ff6fde413afc
|
[
"MIT"
] | null | null | null |
nodes/IGHighMap/__init__.py
|
bertrandboudaud/imagegraph
|
7c95b645edeb1a68d56c6c3b19f1ff6fde413afc
|
[
"MIT"
] | null | null | null |
nodes/IGHighMap/__init__.py
|
bertrandboudaud/imagegraph
|
7c95b645edeb1a68d56c6c3b19f1ff6fde413afc
|
[
"MIT"
] | null | null | null |
from . import IGHighMap
def get():
return IGHighMap.IGHighMap()
| 11.666667
| 32
| 0.7
| 8
| 70
| 6.125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 70
| 5
| 33
| 14
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
73734965ee239caea342ed62f73651915f648f11
| 68,585
|
py
|
Python
|
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/SystemIPC/cmp_namd/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/SystemIPC/cmp_namd/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/SystemIPC/cmp_namd/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.413815,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.527717,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.2802,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.843787,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.46113,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.838002,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 3.14292,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.484461,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 9.91506,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.430778,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0305879,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.374266,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.226217,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.805044,
'Execution Unit/Register Files/Runtime Dynamic': 0.256805,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 1.01775,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 2.18347,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 6.51629,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00156459,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00156459,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0013689,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00053328,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00324962,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0077477,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0147818,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.217468,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.452419,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.738619,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.43103,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.10202,
'L2/Runtime Dynamic': 0.0217443,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 6.5458,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.55748,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.171748,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.171748,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.36014,
'Load Store Unit/Runtime Dynamic': 3.57624,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.423502,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.847004,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.150302,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.151834,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0741683,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.818338,
'Memory Management Unit/Runtime Dynamic': 0.226002,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 31.726,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.50289,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0612313,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.40791,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 1.97203,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 13.7433,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.137168,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.310426,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.756036,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.242122,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.390534,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.197129,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.829785,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.161006,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.46132,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.142831,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0101557,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.12417,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0751076,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.267001,
'Execution Unit/Register Files/Runtime Dynamic': 0.0852633,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.295891,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.639282,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.27013,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000516453,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000516453,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000450503,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000174765,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00107893,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00256234,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00492765,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0722029,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.59272,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.150289,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.245233,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.03413,
'Instruction Fetch Unit/Runtime Dynamic': 0.475215,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0333705,
'L2/Runtime Dynamic': 0.00711534,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.9994,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.849072,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0570141,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.057014,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.26864,
'Load Store Unit/Runtime Dynamic': 1.18726,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.140587,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.281173,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0498948,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0503957,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.285559,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0246379,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.527378,
'Memory Management Unit/Runtime Dynamic': 0.0750337,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.9143,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.375724,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0154964,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.115451,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.506672,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 4.52143,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.165662,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.332807,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.912966,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.292137,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.471206,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.237849,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.00119,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.19415,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.801,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.172479,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0122535,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.149884,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0906225,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.322363,
'Execution Unit/Register Files/Runtime Dynamic': 0.102876,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.357188,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.77159,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.61384,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000621927,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000621927,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000542531,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000210479,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.0013018,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00308818,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0059332,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0871177,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.54143,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.181253,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.295891,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.02888,
'Instruction Fetch Unit/Runtime Dynamic': 0.573283,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0405312,
'L2/Runtime Dynamic': 0.00864609,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.36372,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.02455,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0688007,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0688006,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.68861,
'Load Store Unit/Runtime Dynamic': 1.43265,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.169651,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.339301,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0602096,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0608181,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.344546,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.029714,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.604084,
'Memory Management Unit/Runtime Dynamic': 0.0905321,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 21.7526,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.453713,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.018702,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.139289,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.611704,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.33066,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.164452,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.331857,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.906315,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.290033,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.467812,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.236136,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.99398,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.192763,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.78663,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.171222,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0121653,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.148797,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0899696,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.320019,
'Execution Unit/Register Files/Runtime Dynamic': 0.102135,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.354596,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.766004,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.59935,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00061761,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00061761,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000538769,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000209021,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00129242,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00306641,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00589187,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0864901,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.50151,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.179957,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.293759,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 7.98703,
'Instruction Fetch Unit/Runtime Dynamic': 0.569164,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0401139,
'L2/Runtime Dynamic': 0.00855802,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.34834,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.01714,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0683029,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.068303,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.67088,
'Load Store Unit/Runtime Dynamic': 1.42229,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.168423,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.336847,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.059774,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0603764,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.342064,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0295015,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.600854,
'Memory Management Unit/Runtime Dynamic': 0.0898779,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 21.675,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.450408,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0185668,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.138287,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.607261,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.2965,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 0.05289589625922553,
'Runtime Dynamic': 0.05289589625922553,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.00386633,
'Runtime Dynamic': 0.0011832,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 95.0717,
'Peak Power': 128.184,
'Runtime Dynamic': 28.8931,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 95.0678,
'Total Cores/Runtime Dynamic': 28.8919,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.00386633,
'Total L3s/Runtime Dynamic': 0.0011832,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.038293
| 124
| 0.681971
| 8,082
| 68,585
| 5.781366
| 0.067558
| 0.123617
| 0.113002
| 0.093483
| 0.938769
| 0.930744
| 0.918095
| 0.884922
| 0.861209
| 0.842804
| 0
| 0.131556
| 0.224408
| 68,585
| 914
| 125
| 75.038293
| 0.746832
| 0
| 0
| 0.642232
| 0
| 0
| 0.657642
| 0.048115
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73849363440a8be10ac43c61b547677fd7a2a96d
| 83,677
|
py
|
Python
|
lib/net/rcnn_net.py
|
chuchis/PointRCNN
|
8b4164ca9bb401c06ce74f11b1b76c9d2bc79b13
|
[
"MIT"
] | null | null | null |
lib/net/rcnn_net.py
|
chuchis/PointRCNN
|
8b4164ca9bb401c06ce74f11b1b76c9d2bc79b13
|
[
"MIT"
] | null | null | null |
lib/net/rcnn_net.py
|
chuchis/PointRCNN
|
8b4164ca9bb401c06ce74f11b1b76c9d2bc79b13
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointnet2_lib.pointnet2.pointnet2_modules import PointnetSAModule
from lib.rpn.proposal_target_layer import ProposalTargetLayer
import pointnet2_lib.pointnet2.pytorch_utils as pt_utils
import lib.utils.loss_utils as loss_utils
from lib.config import cfg
import numpy as np
# from PIL import Image
# import png
import lib.utils.kitti_utils as kitti_utils
import lib.utils.roipool3d.roipool3d_utils as roipool3d_utils
from gcn_lib.dense import BasicConv, GraphConv2d, ResDynBlock2d, DenseDynBlock2d, DenseDilatedKnnGraph, ResBlock2d, DynConv2d
from torch.nn import Sequential as Seq
class DenseDeepGCN(torch.nn.Module):
def __init__(self, opt):
super(DenseDeepGCN, self).__init__()
channels = opt.n_filters
k = opt.kernel_size
act = opt.act
norm = opt.norm
bias = opt.bias
epsilon = opt.epsilon
stochastic = opt.stochastic
conv = opt.conv
c_growth = channels
self.n_blocks = opt.n_blocks
self.head_xyz=opt.head
self.knn = DenseDilatedKnnGraph(k, 1, stochastic, epsilon)
self.head = GraphConv2d(opt.in_channels, channels, conv, act, norm, bias)
if opt.constant_dilation:
self.dilation = lambda x: 1
else:
if opt.linear_dilation:
self.dilation = lambda x: x+1
else:
self.dilation = lambda x: (x%4)+1
if opt.block.lower() == 'res':
self.backbone = Seq(*[ResDynBlock2d(channels, k, self.dilation(i), conv, act, norm, bias, stochastic, epsilon)
for i in range(self.n_blocks-1)])
elif opt.block.lower() == 'dense':
self.backbone = Seq(*[DenseDynBlock2d(channels+c_growth*i, c_growth, k, self.dilation(i), conv, act,
norm, bias, stochastic, epsilon)
for i in range(self.n_blocks-1)])
elif opt.block.lower() == 'res_fixed':
self.backbone = Seq(*[ResBlock2d(channels, k, self.dilation(i), conv, act, norm, bias, stochastic, epsilon)
for i in range(self.n_blocks-1)])
elif opt.block.lower() == 'no_res':
self.backbone = Seq(*[DynConv2d(channels, channels, k, self.dilation(i), conv, act, norm, bias, stochastic, epsilon)
for i in range(self.n_blocks-1)])
else:
raise NotImplementedError('{} is not implemented. Please check.\n'.format(opt.block))
self.block = opt.block.lower()
self.fusion_block = BasicConv([channels+c_growth*(self.n_blocks-1), 1024], act, norm, bias)
self.channel_out = 1024+channels+c_growth*(self.n_blocks-1)
self.model_init()
def model_init(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
m.weight.requires_grad = True
if m.bias is not None:
m.bias.data.zero_()
m.bias.requires_grad = True
def forward(self, inputs):
# print(inputs.shape)
#(B,C,N,1)
if self.head_xyz:
knn_input = inputs[:, 0:3]
else:
knn_input = inputs
feats = [self.head(inputs, self.knn(knn_input))]
# print(inputs.shape)
for i in range(self.n_blocks-1):
# print(feats[-1].shape)
# print(i)
if self.block == 'res_fixed':
feats.append(self.backbone[i](feats[-1], knn_input))
else:
feats.append(self.backbone[i](feats[-1]))
feats = torch.cat(feats, dim=1)
fusion = torch.max_pool2d(self.fusion_block(feats), kernel_size=[feats.shape[2], feats.shape[3]])
fusion = torch.repeat_interleave(fusion, repeats=feats.shape[2], dim=2)
return torch.cat((fusion, feats), dim=1)
class DenseOpts():
def __init__(self):
self.n_filters = cfg.RCNN.DEEPGCN_CONFIG.N_FILTERS
self.kernel_size = cfg.RCNN.DEEPGCN_CONFIG.KERNEL_SIZE
self.act = 'relu'
self.norm = 'batch'
self.bias = True
self.epsilon = 0.2
self.stochastic = True
self.conv = cfg.RCNN.DEEPGCN_CONFIG.CONV # edge, mr
self.n_blocks = cfg.RCNN.DEEPGCN_CONFIG.N_BLOCKS
self.in_channels = 3
self.block = cfg.RCNN.DEEPGCN_CONFIG.BLOCK
self.head = cfg.RCNN.DEEPGCN_CONFIG.HEAD
self.constant_dilation = cfg.RCNN.DEEPGCN_CONFIG.CONSTANT_DILATION
self.linear_dilation = cfg.RCNN.DEEPGCN_CONFIG.LINEAR_DILATION
class DenseRCNN(nn.Module):
def __init__(self, num_classes, input_channels=0, use_xyz=True):
super().__init__()
opt = DenseOpts()
self.backbone = DenseDeepGCN(opt)
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
cls_layers = []
channel_in = self.backbone.channel_out
pre_channel = channel_in
for k in range(0, cfg.RCNN.CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.CLS_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, cls_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
cls_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.cls_layer = nn.Sequential(*cls_layers)
if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss':
self.cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(alpha=cfg.RCNN.FOCAL_ALPHA[0],
gamma=cfg.RCNN.FOCAL_GAMMA)
elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy':
self.cls_loss_func = F.binary_cross_entropy
elif cfg.RCNN.LOSS_CLS == 'CrossEntropy':
cls_weight = torch.from_numpy(cfg.RCNN.CLS_WEIGHT).float()
self.cls_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduce=False, weight=cls_weight)
else:
raise NotImplementedError
# regression layer
per_loc_bin_num = int(cfg.RCNN.LOC_SCOPE / cfg.RCNN.LOC_BIN_SIZE) * 2
loc_y_bin_num = int(cfg.RCNN.LOC_Y_SCOPE / cfg.RCNN.LOC_Y_BIN_SIZE) * 2
reg_channel = per_loc_bin_num * 4 + cfg.RCNN.NUM_HEAD_BIN * 2 + 3
reg_channel += (1 if not cfg.RCNN.LOC_Y_BY_BIN else loc_y_bin_num * 2)
reg_layers = []
pre_channel = channel_in
for k in range(0, cfg.RCNN.REG_FC.__len__()):
reg_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.REG_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.REG_FC[k]
reg_layers.append(pt_utils.Conv1d(pre_channel, reg_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
reg_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.reg_layer = nn.Sequential(*reg_layers)
self.proposal_target_layer = ProposalTargetLayer()
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layer[-1].conv.weight, mean=0, std=0.001)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, input_data):
"""
:param input_data: input dict
:return:
"""
if cfg.RCNN.ROI_SAMPLE_JIT:
if self.training:
with torch.no_grad():
target_dict = self.proposal_target_layer(input_data)
pts_input = torch.cat((target_dict['sampled_pts'], target_dict['pts_feature']), dim=2)
target_dict['pts_input'] = pts_input
else:
rpn_xyz, rpn_features = input_data['rpn_xyz'], input_data['rpn_features']
batch_rois = input_data['roi_boxes3d']
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [input_data['rpn_intensity'].unsqueeze(dim=2),
input_data['seg_mask'].unsqueeze(dim=2)]
else:
pts_extra_input_list = [input_data['seg_mask'].unsqueeze(dim=2)]
if cfg.RCNN.USE_DEPTH:
pts_depth = input_data['pts_depth'] / 70.0 - 0.5
pts_extra_input_list.append(pts_depth.unsqueeze(dim=2))
pts_extra_input = torch.cat(pts_extra_input_list, dim=2)
pts_feature = torch.cat((pts_extra_input, rpn_features), dim=2)
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_center = batch_rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
for k in range(batch_size):
pooled_features[k, :, :, 0:3] = kitti_utils.rotate_pc_along_y_torch(pooled_features[k, :, :, 0:3],
batch_rois[k, :, 6])
pts_input = pooled_features.view(-1, pooled_features.shape[2], pooled_features.shape[3])
else:
pts_input = input_data['pts_input'].view(-1,512,133)
target_dict = {}
target_dict['pts_input'] = input_data['pts_input'].view(-1,512,133)
target_dict['roi_boxes3d'] = input_data['roi_boxes3d'].view(-1,7)
if self.training:
target_dict['cls_label'] = input_data['cls_label'].view(-1)
target_dict['reg_valid_mask'] = input_data['reg_valid_mask'].view(-1)
target_dict['gt_of_rois'] = input_data['gt_boxes3d_ct'].view(-1,7)
xyz, features = self._break_up_pc(pts_input)
# print(xyz)
# print(xyz.shape)
pt_features = self.backbone(xyz.transpose(1,2).contiguous().unsqueeze(3))
features = torch.max(pt_features, dim=2)[0]
# print(features.shape)
rcnn_cls = self.cls_layer(features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layer(features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
ret_dict = {'rcnn_cls': rcnn_cls, 'rcnn_reg': rcnn_reg}
if self.training:
ret_dict.update(target_dict)
return ret_dict
class DenseFeatRCNN(nn.Module):
def __init__(self, num_classes, input_channels=0, use_xyz=True):
super().__init__()
if cfg.RCNN.USE_RPN_FEATURES:
self.rcnn_input_channel = 3 + int(cfg.RCNN.USE_INTENSITY) + int(cfg.RCNN.USE_MASK) + int(cfg.RCNN.USE_DEPTH)
self.xyz_up_layer = pt_utils.SharedMLP([self.rcnn_input_channel] + cfg.RCNN.XYZ_UP_LAYER,
bn=cfg.RCNN.USE_BN)
c_out = cfg.RCNN.XYZ_UP_LAYER[-1]
self.merge_down_layer = pt_utils.SharedMLP([c_out * 2, c_out], bn=cfg.RCNN.USE_BN)
opt = DenseOpts()
opt.in_channels = input_channels
self.backbone = DenseDeepGCN(opt)
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
cls_layers = []
channel_in = self.backbone.channel_out
pre_channel = channel_in
for k in range(0, cfg.RCNN.CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.CLS_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, cls_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
cls_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.cls_layer = nn.Sequential(*cls_layers)
if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss':
self.cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(alpha=cfg.RCNN.FOCAL_ALPHA[0],
gamma=cfg.RCNN.FOCAL_GAMMA)
elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy':
self.cls_loss_func = F.binary_cross_entropy
elif cfg.RCNN.LOSS_CLS == 'CrossEntropy':
cls_weight = torch.from_numpy(cfg.RCNN.CLS_WEIGHT).float()
self.cls_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduce=False, weight=cls_weight)
else:
raise NotImplementedError
# regression layer
per_loc_bin_num = int(cfg.RCNN.LOC_SCOPE / cfg.RCNN.LOC_BIN_SIZE) * 2
loc_y_bin_num = int(cfg.RCNN.LOC_Y_SCOPE / cfg.RCNN.LOC_Y_BIN_SIZE) * 2
reg_channel = per_loc_bin_num * 4 + cfg.RCNN.NUM_HEAD_BIN * 2 + 3
reg_channel += (1 if not cfg.RCNN.LOC_Y_BY_BIN else loc_y_bin_num * 2)
reg_layers = []
pre_channel = channel_in
for k in range(0, cfg.RCNN.REG_FC.__len__()):
reg_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.REG_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.REG_FC[k]
reg_layers.append(pt_utils.Conv1d(pre_channel, reg_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
reg_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.reg_layer = nn.Sequential(*reg_layers)
self.proposal_target_layer = ProposalTargetLayer()
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layer[-1].conv.weight, mean=0, std=0.001)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, input_data):
"""
:param input_data: input dict
:return:
"""
if cfg.RCNN.ROI_SAMPLE_JIT:
if self.training:
with torch.no_grad():
target_dict = self.proposal_target_layer(input_data)
pts_input = torch.cat((target_dict['sampled_pts'], target_dict['pts_feature']), dim=2)
target_dict['pts_input'] = pts_input
else:
rpn_xyz, rpn_features = input_data['rpn_xyz'], input_data['rpn_features']
batch_rois = input_data['roi_boxes3d']
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [input_data['rpn_intensity'].unsqueeze(dim=2),
input_data['seg_mask'].unsqueeze(dim=2)]
else:
pts_extra_input_list = [input_data['seg_mask'].unsqueeze(dim=2)]
if cfg.RCNN.USE_DEPTH:
pts_depth = input_data['pts_depth'] / 70.0 - 0.5
pts_extra_input_list.append(pts_depth.unsqueeze(dim=2))
pts_extra_input = torch.cat(pts_extra_input_list, dim=2)
pts_feature = torch.cat((pts_extra_input, rpn_features), dim=2)
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_center = batch_rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
for k in range(batch_size):
pooled_features[k, :, :, 0:3] = kitti_utils.rotate_pc_along_y_torch(pooled_features[k, :, :, 0:3],
batch_rois[k, :, 6])
pts_input = pooled_features.view(-1, pooled_features.shape[2], pooled_features.shape[3])
else:
pts_input = input_data['pts_input'].view(-1,512,133)
target_dict = {}
target_dict['pts_input'] = input_data['pts_input'].view(-1,512,133)
target_dict['roi_boxes3d'] = input_data['roi_boxes3d'].view(-1,7)
if self.training:
target_dict['cls_label'] = input_data['cls_label'].view(-1)
target_dict['reg_valid_mask'] = input_data['reg_valid_mask'].view(-1)
target_dict['gt_of_rois'] = input_data['gt_boxes3d_ct'].view(-1,7)
xyz, features = self._break_up_pc(pts_input)
if cfg.RCNN.USE_RPN_FEATURES:
xyz_input = pts_input[..., 0:self.rcnn_input_channel].transpose(1, 2).unsqueeze(dim=3)
xyz_feature = self.xyz_up_layer(xyz_input)
rpn_feature = pts_input[..., self.rcnn_input_channel:].transpose(1, 2).unsqueeze(dim=3)
merged_feature = torch.cat((xyz_feature, rpn_feature), dim=1)
merged_feature = self.merge_down_layer(merged_feature)
l_xyz, l_features = [xyz], [merged_feature.squeeze(dim=3)]
else:
l_xyz, l_features = [xyz], [features]
# print(xyz)
# print(xyz.shape)
# print(l_xyz[-1].shape, l_features[-1].shape)
# input_features = torch.cat((l_xyz[-1], l_features[-1].transpose(1,2).contiguous()), dim=2)
# print(l_features[-1].shape)
pt_features = self.backbone(l_features[-1].unsqueeze(3))
features = torch.max(pt_features, dim=2)[0]
# print(features.shape)
rcnn_cls = self.cls_layer(features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layer(features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
ret_dict = {'rcnn_cls': rcnn_cls, 'rcnn_reg': rcnn_reg}
if self.training:
ret_dict.update(target_dict)
return ret_dict
def knn(x, k):
inner = -2*torch.matmul(x.transpose(2, 1), x)
xx = torch.sum(x**2, dim=1, keepdim=True)
pairwise_distance = -xx - inner - xx.transpose(2, 1)
idx = pairwise_distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def get_graph_feature(x, k=20, idx=None):
batch_size, num_dims, num_points = x.size()
device = torch.device('cuda')
x = x.view(batch_size, -1, num_points)
if idx is None:
idx = knn(x, k=k) # (batch_size, num_points, k)
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points
idx = idx + idx_base
idx = idx.view(-1)
x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
feature = x.view(batch_size*num_points, -1)
feature = feature[idx, :]
feature = feature.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2)
# print(feature.shape)
return feature
def get_graph_feature_spatial(xyz, feature, k=20, idx=None):
batch_size, num_dims, num_points = feature.size()
device = torch.device('cuda')
xyz = xyz.view(batch_size, -1, num_points)
if idx is None:
idx = knn(xyz, k=k) # (batch_size, num_points, k)
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1)*num_points
idx = idx + idx_base
idx = idx.view(-1)
feature = feature.view(batch_size, -1, num_points)
x = feature.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
feature = x.view(batch_size*num_points, -1)
feature = feature[idx, :]
feature = feature.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2)
# print(feature.shape)
return feature
def batch_process(input, fun, num_batches=5):
num_data = input.shape[0]
data_per_batch = np.ceil(num_data/num_batches).astype(int)
for i in range(num_batches):
if i == 0:
out = fun(input[:data_per_batch])
else:
start = data_per_batch*i
end = min(data_per_batch*(i+1), num_data)
out = torch.cat((out,fun(input[start:end])), axis=0)
# print(out.shape)
return out
class GCNLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
use_norm=True,
last_layer=False):
"""
Pillar Feature Net Layer.
The Pillar Feature Net could be composed of a series of these layers, but the PointPillars paper results only
used a single PFNLayer. This layer performs a similar role as second.pytorch.voxelnet.VFELayer.
:param in_channels: <int>. Number of input channels.
:param out_channels: <int>. Number of output channels.
:param use_norm: <bool>. Whether to include BatchNorm.
:param last_layer: <bool>. If last_layer, there is no concatenation of features.
"""
super().__init__()
self.name = 'GCNLayer'
self.units = out_channels
self.in_channels = in_channels*2
self.last_layer = last_layer
if use_norm:
BatchNorm2d = nn.BatchNorm2d(self.units,
eps=1e-3, momentum=0.01)
Conv2d = nn.Conv2d(self.in_channels, self.units, kernel_size=1, bias=False)
else:
BatchNorm2d = nn.Identity()
Conv2d = nn.Conv2d(self.in_channels, self.units, kernel_size=1, bias=True)
self.seq = nn.Sequential(Conv2d,
BatchNorm2d,
nn.LeakyReLU(negative_slope=0.2))
self.k = 8
def forward(self, inputs, xyz=None):
# x = get_graph_feature(inputs.transpose(1,2).contiguous(), k=self.k)
x = get_graph_feature_spatial(xyz, inputs.transpose(1,2).contiguous(), k=self.k)
# print(x.shape)
x = self.seq(x)
# print(x.shape)
x = x.max(dim=-1, keepdim=False)[0].transpose(1,2).contiguous()
# print(x.shape)
if self.last_layer:
x = x.max(dim=1, keepdim=True)[0].transpose(1,2).contiguous()
return x
class GCNNet(nn.Module):
def __init__(self, num_classes, input_channels=0, use_xyz=True):
super().__init__()
self.gcn_layers = nn.ModuleList()
channel_in = input_channels
if cfg.RCNN.USE_RPN_FEATURES:
self.rcnn_input_channel = 3 + int(cfg.RCNN.USE_INTENSITY) + int(cfg.RCNN.USE_MASK) + int(cfg.RCNN.USE_DEPTH)
self.xyz_up_layer = pt_utils.SharedMLP([self.rcnn_input_channel] + cfg.RCNN.XYZ_UP_LAYER,
bn=cfg.RCNN.USE_BN)
c_out = cfg.RCNN.XYZ_UP_LAYER[-1]
self.merge_down_layer = pt_utils.SharedMLP([c_out * 2, c_out], bn=cfg.RCNN.USE_BN)
for k in range(cfg.RCNN.GCN_CONFIG.FILTERS.__len__()):
in_channels = 3 if k==0 else cfg.RCNN.GCN_CONFIG.FILTERS[k-1]
self.gcn_layers.append(
GCNLayer(
in_channels=in_channels,
out_channels=cfg.RCNN.GCN_CONFIG.FILTERS[k],
use_norm=cfg.RCNN.USE_BN,
last_layer=k==cfg.RCNN.GCN_CONFIG.FILTERS.__len__()-1
)
)
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
cls_layers = []
pre_channel = channel_in
for k in range(0, cfg.RCNN.CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.CLS_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, cls_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
cls_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.cls_layer = nn.Sequential(*cls_layers)
if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss':
self.cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(alpha=cfg.RCNN.FOCAL_ALPHA[0],
gamma=cfg.RCNN.FOCAL_GAMMA)
elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy':
self.cls_loss_func = F.binary_cross_entropy
elif cfg.RCNN.LOSS_CLS == 'CrossEntropy':
cls_weight = torch.from_numpy(cfg.RCNN.CLS_WEIGHT).float()
self.cls_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduce=False, weight=cls_weight)
else:
raise NotImplementedError
# regression layer
per_loc_bin_num = int(cfg.RCNN.LOC_SCOPE / cfg.RCNN.LOC_BIN_SIZE) * 2
loc_y_bin_num = int(cfg.RCNN.LOC_Y_SCOPE / cfg.RCNN.LOC_Y_BIN_SIZE) * 2
reg_channel = per_loc_bin_num * 4 + cfg.RCNN.NUM_HEAD_BIN * 2 + 3
reg_channel += (1 if not cfg.RCNN.LOC_Y_BY_BIN else loc_y_bin_num * 2)
reg_layers = []
pre_channel = channel_in
for k in range(0, cfg.RCNN.REG_FC.__len__()):
reg_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.REG_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.REG_FC[k]
reg_layers.append(pt_utils.Conv1d(pre_channel, reg_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
reg_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.reg_layer = nn.Sequential(*reg_layers)
self.proposal_target_layer = ProposalTargetLayer()
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layer[-1].conv.weight, mean=0, std=0.001)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, input_data):
"""
:param input_data: input dict
:return:
"""
if cfg.RCNN.ROI_SAMPLE_JIT:
if self.training:
with torch.no_grad():
target_dict = self.proposal_target_layer(input_data)
pts_input = torch.cat((target_dict['sampled_pts'], target_dict['pts_feature']), dim=2)
target_dict['pts_input'] = pts_input
else:
rpn_xyz, rpn_features = input_data['rpn_xyz'], input_data['rpn_features']
batch_rois = input_data['roi_boxes3d']
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [input_data['rpn_intensity'].unsqueeze(dim=2),
input_data['seg_mask'].unsqueeze(dim=2)]
else:
pts_extra_input_list = [input_data['seg_mask'].unsqueeze(dim=2)]
if cfg.RCNN.USE_DEPTH:
pts_depth = input_data['pts_depth'] / 70.0 - 0.5
pts_extra_input_list.append(pts_depth.unsqueeze(dim=2))
pts_extra_input = torch.cat(pts_extra_input_list, dim=2)
pts_feature = torch.cat((pts_extra_input, rpn_features), dim=2)
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_center = batch_rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
for k in range(batch_size):
pooled_features[k, :, :, 0:3] = kitti_utils.rotate_pc_along_y_torch(pooled_features[k, :, :, 0:3],
batch_rois[k, :, 6])
pts_input = pooled_features.view(-1, pooled_features.shape[2], pooled_features.shape[3])
else:
pts_input = input_data['pts_input']
target_dict = {}
target_dict['pts_input'] = input_data['pts_input']
target_dict['roi_boxes3d'] = input_data['roi_boxes3d']
if self.training:
target_dict['cls_label'] = input_data['cls_label']
target_dict['reg_valid_mask'] = input_data['reg_valid_mask']
target_dict['gt_of_rois'] = input_data['gt_boxes3d_ct']
xyz, features = self._break_up_pc(pts_input)
if cfg.RCNN.USE_RPN_FEATURES:
xyz_input = pts_input[..., 0:self.rcnn_input_channel].transpose(1, 2).unsqueeze(dim=3)
xyz_feature = self.xyz_up_layer(xyz_input)
rpn_feature = pts_input[..., self.rcnn_input_channel:].transpose(1, 2).unsqueeze(dim=3)
merged_feature = torch.cat((xyz_feature, rpn_feature), dim=1)
merged_feature = self.merge_down_layer(merged_feature)
l_xyz, l_features = [xyz], [merged_feature.squeeze(dim=3)]
else:
l_xyz, l_features = [xyz], [features]
for i in range(len(self.gcn_layers)):
if i == 0:
li_features = self.gcn_layers[i](l_xyz[i], xyz=l_xyz[0])
else:
li_features = self.gcn_layers[i](l_features[i], xyz=l_xyz[0])
# print(li_features.shape)
l_features.append(li_features)
# print(l_features[-1].shape, rpn_feature.shape)
rcnn_cls = self.cls_layer(torch.cat((l_features[-1],rpn_feature.max(dim=2)[0]), dim=1)).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layer(torch.cat((l_features[-1],rpn_feature.max(dim=2)[0]), dim=1)).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
ret_dict = {'rcnn_cls': rcnn_cls, 'rcnn_reg': rcnn_reg}
if self.training:
ret_dict.update(target_dict)
return ret_dict
def get_num_rot(degree_res):
return int(np.ceil(360/degree_res))
def create_rot_mat(degree_res):
angles = np.radians([(i * degree_res) for i in range(get_num_rot(degree_res))])
# print(angles, angles.shape)
cosas = np.cos(angles); sinas = np.sin(angles);
# x_rot = (x - cx) * cosa + (z - cz) * (-sina);
# z_rot = (x - cx) * sina + (z - cz) * cosa;
# print(zip(cosas, sinas))
rot_mat = [[[cosa, 0, -sina],
[ 0, 1, 0],
[sina, 0, cosa]]
for cosa, sina in zip(cosas, sinas)]
# print(rot_mat[1])
return rot_mat
class RotProjNet(nn.Module):
def __init__(self, degree_res):
super().__init__()
self.degree_res = degree_res
self.num_rot = get_num_rot(degree_res)
self.rot_mat = torch.tensor(create_rot_mat(self.degree_res)).float().cuda()
self.pixel_size = 0.0625 #pixel size in meters
self.im_size_meters = np.array([4,4]) #image size in meters.
self.im_size = (self.im_size_meters / self.pixel_size).astype(int)
# print(self.im_size)
def forward(self, xyz):
#PARAM xyz: (B, N, 3)
batch_size, num_pts, _ = xyz.shape
xyz_rot = xyz.repeat_interleave(self.num_rot, dim=0).contiguous().transpose(1,2).contiguous()
rot_mat = self.rot_mat.repeat(batch_size, 1, 1)
xyz_rot = torch.bmm(rot_mat, xyz_rot)
xyz_rot = xyz_rot.view(batch_size, self.num_rot, 3, num_pts).contiguous().transpose(2,3).contiguous() # (B, M, N, 3) with M different views
xyz_proj = xyz_rot[:,:,:,:2] + torch.tensor([self.im_size_meters[0]/2, self.im_size_meters[0]/2]).cuda() #(B, M, N, 3) with M different views
# print(xyz_proj.shape)
xyz_proj = torch.round(xyz_proj/self.pixel_size).long()
# xyz_proj[:,:,:,1] = xyz_proj[:,:,:,1]
# print(xyz_proj.shape)
xyz_proj_mask = (xyz_proj[:,:,:,0] >= 0) & (xyz_proj[:,:,:,0] < self.im_size[0]) & (xyz_proj[:,:,:,1] >= 0) & (xyz_proj[:,:,:,1] < self.im_size[1])
# print(xyz_proj_mask.shape)
xyz_proj = xyz_proj * xyz_proj_mask.unsqueeze(-1).long()
# print(xyz_proj)
image = torch.zeros(batch_size, self.num_rot, self.im_size[0], self.im_size[1]).cuda()
# print(xyz_proj[:,:,:,0].shape)
batch = np.arange(batch_size)
rots = np.arange(self.num_rot)
pts = np.arange(num_pts)
B, M, N = np.meshgrid(batch, rots, pts)
B = B.flatten()
M = M.flatten()
N = N.flatten()
# for b in range(batch_size):
# for m in range(self.num_rot):
# for n in range(num_pts):
# image[b,m, xyz_proj[b,m,n,0], xyz_proj[b,m,n,1]] = 1 #occupied voxelization
# print(B.shape)
if cfg.RCNN.ROT_CONFIG.OCCUPANCY:
image[B,M,xyz_proj[B,M,N,1], xyz_proj[B,M,N,0]] = 1 # occupied voxelization
else:
image[B,M,xyz_proj[B,M,N,1], xyz_proj[B,M,N,0]] = xyz_rot[B,M,N,2]/10 # distance voxelization
# for i in range(self.num_rot):
# f = open("views/image"+str(i)+".png", 'wb') # binary mode is important
# w = png.Writer(64, 64, greyscale=True)
# w.write(f,image[0,i].detach().cpu().numpy().astype(np.uint8)*255)
# f.close()
# print(image)
return image
class RotRefModule(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, use_bn, dropout):
super().__init__()
# layer_size = [1024, 512, 256, 128]
if use_bn:
batch_norm = nn.BatchNorm2d
else:
batch_norm = nn.Identity
bias = not use_bn
self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, bias=bias),
batch_norm(out_channels),
nn.Dropout(p=dropout),
nn.LeakyReLU(negative_slope=0.2)
)
def forward(self, img):
return self.conv(img)
class RotRCNN(nn.Module):
def __init__(self, num_classes, input_channels=0, use_xyz=True):
super().__init__()
self.rot_net = RotProjNet(cfg.RCNN.ROT_CONFIG.DEGREE_RES)
self.ref_modules = nn.ModuleList()
channel_in = input_channels
for k in range(cfg.RCNN.ROT_CONFIG.NFILTERS.__len__()):
channel_out = cfg.RCNN.ROT_CONFIG.NFILTERS[k] if cfg.RCNN.ROT_CONFIG.NFILTERS[k]!= -1 else None
kernel_size = cfg.RCNN.ROT_CONFIG.KERNEL_SIZE[k]
stride = cfg.RCNN.ROT_CONFIG.STRIDE[k]
self.ref_modules.append(
RotRefModule(
in_channels=channel_in,
out_channels=channel_out,
kernel_size=kernel_size,
stride=stride,
use_bn=cfg.RCNN.USE_BN,
dropout=cfg.RCNN.ROT_CONFIG.DROPOUT
)
)
channel_in = cfg.RCNN.ROT_CONFIG.NFILTERS[k]
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
cls_layers = []
pre_channel = channel_in * cfg.RCNN.ROT_CONFIG.CONV_FEAT_MULTIPLIER
for k in range(0, cfg.RCNN.CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.CLS_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, cls_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
cls_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.cls_layer = nn.Sequential(*cls_layers)
if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss':
self.cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(alpha=cfg.RCNN.FOCAL_ALPHA[0],
gamma=cfg.RCNN.FOCAL_GAMMA)
elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy':
self.cls_loss_func = F.binary_cross_entropy
elif cfg.RCNN.LOSS_CLS == 'CrossEntropy':
cls_weight = torch.from_numpy(cfg.RCNN.CLS_WEIGHT).float()
self.cls_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduce=False, weight=cls_weight)
else:
raise NotImplementedError
# regression layer
per_loc_bin_num = int(cfg.RCNN.LOC_SCOPE / cfg.RCNN.LOC_BIN_SIZE) * 2
loc_y_bin_num = int(cfg.RCNN.LOC_Y_SCOPE / cfg.RCNN.LOC_Y_BIN_SIZE) * 2
reg_channel = per_loc_bin_num * 4 + cfg.RCNN.NUM_HEAD_BIN * 2 + 3
reg_channel += (1 if not cfg.RCNN.LOC_Y_BY_BIN else loc_y_bin_num * 2)
reg_layers = []
pre_channel = channel_in * cfg.RCNN.ROT_CONFIG.CONV_FEAT_MULTIPLIER
for k in range(0, cfg.RCNN.REG_FC.__len__()):
reg_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.REG_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.REG_FC[k]
reg_layers.append(pt_utils.Conv1d(pre_channel, reg_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
reg_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.reg_layer = nn.Sequential(*reg_layers)
self.proposal_target_layer = ProposalTargetLayer()
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layer[-1].conv.weight, mean=0, std=0.001)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, input_data):
"""
:param input_data: input dict
:return:
"""
if cfg.RCNN.ROI_SAMPLE_JIT:
if self.training:
with torch.no_grad():
target_dict = self.proposal_target_layer(input_data)
pts_input = torch.cat((target_dict['sampled_pts'], target_dict['pts_feature']), dim=2)
target_dict['pts_input'] = pts_input
else:
rpn_xyz, rpn_features = input_data['rpn_xyz'], input_data['rpn_features']
batch_rois = input_data['roi_boxes3d']
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [input_data['rpn_intensity'].unsqueeze(dim=2),
input_data['seg_mask'].unsqueeze(dim=2)]
else:
pts_extra_input_list = [input_data['seg_mask'].unsqueeze(dim=2)]
if cfg.RCNN.USE_DEPTH:
pts_depth = input_data['pts_depth'] / 70.0 - 0.5
pts_extra_input_list.append(pts_depth.unsqueeze(dim=2))
pts_extra_input = torch.cat(pts_extra_input_list, dim=2)
pts_feature = torch.cat((pts_extra_input, rpn_features), dim=2)
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_center = batch_rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
for k in range(batch_size):
pooled_features[k, :, :, 0:3] = kitti_utils.rotate_pc_along_y_torch(pooled_features[k, :, :, 0:3],
batch_rois[k, :, 6])
pts_input = pooled_features.view(-1, pooled_features.shape[2], pooled_features.shape[3])
else:
pts_input = input_data['pts_input']
target_dict = {}
target_dict['pts_input'] = input_data['pts_input']
target_dict['roi_boxes3d'] = input_data['roi_boxes3d']
if self.training:
target_dict['cls_label'] = input_data['cls_label']
target_dict['reg_valid_mask'] = input_data['reg_valid_mask']
target_dict['gt_of_rois'] = input_data['gt_boxes3d_ct']
xyz, features = self._break_up_pc(pts_input)
# print(xyz)
l_features = [self.rot_net(xyz)]
# print(l_features)
for layer in self.ref_modules:
l_features.append(layer(l_features[-1]))
# print(l_features.shape)
features = l_features[-1].view(l_features[-1].shape[0], -1).unsqueeze(2)
# print(features.shape)
rcnn_cls = self.cls_layer(features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layer(features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
ret_dict = {'rcnn_cls': rcnn_cls, 'rcnn_reg': rcnn_reg}
if self.training:
ret_dict.update(target_dict)
return ret_dict
class RCNNNet(nn.Module):
def __init__(self, num_classes, input_channels=0, use_xyz=True):
super().__init__()
self.SA_modules = nn.ModuleList()
channel_in = input_channels
if cfg.RCNN.USE_RPN_FEATURES:
self.rcnn_input_channel = 3 + int(cfg.RCNN.USE_INTENSITY) + int(cfg.RCNN.USE_MASK) + int(cfg.RCNN.USE_DEPTH)
self.xyz_up_layer = pt_utils.SharedMLP([self.rcnn_input_channel] + cfg.RCNN.XYZ_UP_LAYER,
bn=cfg.RCNN.USE_BN)
c_out = cfg.RCNN.XYZ_UP_LAYER[-1]
self.merge_down_layer = pt_utils.SharedMLP([c_out * 2, c_out], bn=cfg.RCNN.USE_BN)
for k in range(cfg.RCNN.SA_CONFIG.NPOINTS.__len__()):
mlps = [channel_in] + cfg.RCNN.SA_CONFIG.MLPS[k]
npoint = cfg.RCNN.SA_CONFIG.NPOINTS[k] if cfg.RCNN.SA_CONFIG.NPOINTS[k] != -1 else None
self.SA_modules.append(
PointnetSAModule(
npoint=npoint,
radius=cfg.RCNN.SA_CONFIG.RADIUS[k],
nsample=cfg.RCNN.SA_CONFIG.NSAMPLE[k],
mlp=mlps,
use_xyz=use_xyz,
bn=cfg.RCNN.USE_BN
)
)
channel_in = mlps[-1]
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
cls_layers = []
pre_channel = channel_in
for k in range(0, cfg.RCNN.CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.CLS_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, cls_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
cls_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.cls_layer = nn.Sequential(*cls_layers)
if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss':
self.cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(alpha=cfg.RCNN.FOCAL_ALPHA[0],
gamma=cfg.RCNN.FOCAL_GAMMA)
elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy':
self.cls_loss_func = F.binary_cross_entropy
elif cfg.RCNN.LOSS_CLS == 'CrossEntropy':
cls_weight = torch.from_numpy(cfg.RCNN.CLS_WEIGHT).float()
self.cls_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduce=False, weight=cls_weight)
else:
raise NotImplementedError
# regression layer
per_loc_bin_num = int(cfg.RCNN.LOC_SCOPE / cfg.RCNN.LOC_BIN_SIZE) * 2
loc_y_bin_num = int(cfg.RCNN.LOC_Y_SCOPE / cfg.RCNN.LOC_Y_BIN_SIZE) * 2
reg_channel = per_loc_bin_num * 4 + cfg.RCNN.NUM_HEAD_BIN * 2 + 3
reg_channel += (1 if not cfg.RCNN.LOC_Y_BY_BIN else loc_y_bin_num * 2)
reg_layers = []
pre_channel = channel_in
for k in range(0, cfg.RCNN.REG_FC.__len__()):
reg_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.REG_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.REG_FC[k]
reg_layers.append(pt_utils.Conv1d(pre_channel, reg_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
reg_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.reg_layer = nn.Sequential(*reg_layers)
self.proposal_target_layer = ProposalTargetLayer()
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layer[-1].conv.weight, mean=0, std=0.001)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, input_data):
"""
:param input_data: input dict
:return:
"""
if cfg.RCNN.ROI_SAMPLE_JIT:
if self.training:
with torch.no_grad():
target_dict = self.proposal_target_layer(input_data)
pts_input = torch.cat((target_dict['sampled_pts'], target_dict['pts_feature']), dim=2)
target_dict['pts_input'] = pts_input
else:
rpn_xyz, rpn_features = input_data['rpn_xyz'], input_data['rpn_features']
batch_rois = input_data['roi_boxes3d']
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [input_data['rpn_intensity'].unsqueeze(dim=2),
input_data['seg_mask'].unsqueeze(dim=2)]
else:
pts_extra_input_list = [input_data['seg_mask'].unsqueeze(dim=2)]
if cfg.RCNN.USE_DEPTH:
pts_depth = input_data['pts_depth'] / 70.0 - 0.5
pts_extra_input_list.append(pts_depth.unsqueeze(dim=2))
pts_extra_input = torch.cat(pts_extra_input_list, dim=2)
pts_feature = torch.cat((pts_extra_input, rpn_features), dim=2)
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_center = batch_rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
for k in range(batch_size):
pooled_features[k, :, :, 0:3] = kitti_utils.rotate_pc_along_y_torch(pooled_features[k, :, :, 0:3],
batch_rois[k, :, 6])
pts_input = pooled_features.view(-1, pooled_features.shape[2], pooled_features.shape[3])
else:
pts_input = input_data['pts_input'].view(-1,512,133)
target_dict = {}
target_dict['pts_input'] = input_data['pts_input'].view(-1,512,133)
target_dict['roi_boxes3d'] = input_data['roi_boxes3d'].view(-1,7)
if self.training:
target_dict['cls_label'] = input_data['cls_label'].view(-1)
target_dict['reg_valid_mask'] = input_data['reg_valid_mask'].view(-1)
target_dict['gt_of_rois'] = input_data['gt_boxes3d_ct'].view(-1,7)
xyz, features = self._break_up_pc(pts_input)
if cfg.RCNN.USE_RPN_FEATURES:
xyz_input = pts_input[..., 0:self.rcnn_input_channel].transpose(1, 2).unsqueeze(dim=3)
xyz_feature = self.xyz_up_layer(xyz_input)
rpn_feature = pts_input[..., self.rcnn_input_channel:].transpose(1, 2).unsqueeze(dim=3)
merged_feature = torch.cat((xyz_feature, rpn_feature), dim=1)
merged_feature = self.merge_down_layer(merged_feature)
l_xyz, l_features = [xyz], [merged_feature.squeeze(dim=3)]
else:
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
# print(l_features[-1].shape)
rcnn_cls = self.cls_layer(l_features[-1]).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layer(l_features[-1]).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
# print(rcnn_cls.shape)
ret_dict = {'rcnn_cls': rcnn_cls, 'rcnn_reg': rcnn_reg}
if self.training:
ret_dict.update(target_dict)
return ret_dict
class RefineRCNNNet(nn.Module):
def __init__(self, num_classes, input_channels=0, use_xyz=True):
super().__init__()
self.SA_modules = nn.ModuleList()
channel_in = input_channels
opt = DenseOpts()
opt.head=False
opt.in_channels = 512 + 7 * cfg.RCNN.REF_CONFIG.USE_PROPOSALS + 128 * cfg.RCNN.REF_CONFIG.USE_RPN_FEATS
# opts.constant_dilation=True
opt.n_blocks = cfg.RCNN.REF_CONFIG.N_BLOCKS
opt.kernel_size = cfg.RCNN.REF_CONFIG.KERNEL_SIZE
opt.n_filters = cfg.RCNN.REF_CONFIG.N_FILTERS
opt.conv = cfg.RCNN.REF_CONFIG.CONV
opt.constant_dilation=cfg.RCNN.REF_CONFIG.CONSTANT_DILATION
opt.linear_dilation=cfg.RCNN.REF_CONFIG.LINEAR_DILATION
self.refine = DenseDeepGCN(opt)
if cfg.RCNN.USE_RPN_FEATURES:
self.rcnn_input_channel = 3 + int(cfg.RCNN.USE_INTENSITY) + int(cfg.RCNN.USE_MASK) + int(cfg.RCNN.USE_DEPTH)
self.xyz_up_layer = pt_utils.SharedMLP([self.rcnn_input_channel] + cfg.RCNN.XYZ_UP_LAYER,
bn=cfg.RCNN.USE_BN)
c_out = cfg.RCNN.XYZ_UP_LAYER[-1]
self.merge_down_layer = pt_utils.SharedMLP([c_out * 2, c_out], bn=cfg.RCNN.USE_BN)
for k in range(cfg.RCNN.SA_CONFIG.NPOINTS.__len__()):
mlps = [channel_in] + cfg.RCNN.SA_CONFIG.MLPS[k]
npoint = cfg.RCNN.SA_CONFIG.NPOINTS[k] if cfg.RCNN.SA_CONFIG.NPOINTS[k] != -1 else None
self.SA_modules.append(
PointnetSAModule(
npoint=npoint,
radius=cfg.RCNN.SA_CONFIG.RADIUS[k],
nsample=cfg.RCNN.SA_CONFIG.NSAMPLE[k],
mlp=mlps,
use_xyz=use_xyz,
bn=cfg.RCNN.USE_BN
)
)
channel_in = mlps[-1]
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
cls_layers = []
channel_in = self.refine.channel_out
if cfg.RCNN.REF_CONFIG.USE_RCNN_FEATS:
channel_in += opt.in_channels
pre_channel = channel_in
for k in range(0, cfg.RCNN.CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.CLS_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, cls_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
cls_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.cls_layer = nn.Sequential(*cls_layers)
if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss':
self.cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(alpha=cfg.RCNN.FOCAL_ALPHA[0],
gamma=cfg.RCNN.FOCAL_GAMMA)
elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy':
self.cls_loss_func = F.binary_cross_entropy
elif cfg.RCNN.LOSS_CLS == 'CrossEntropy':
cls_weight = torch.from_numpy(cfg.RCNN.CLS_WEIGHT).float()
self.cls_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduce=False, weight=cls_weight)
else:
raise NotImplementedError
# regression layer
per_loc_bin_num = int(cfg.RCNN.LOC_SCOPE / cfg.RCNN.LOC_BIN_SIZE) * 2
loc_y_bin_num = int(cfg.RCNN.LOC_Y_SCOPE / cfg.RCNN.LOC_Y_BIN_SIZE) * 2
reg_channel = per_loc_bin_num * 4 + cfg.RCNN.NUM_HEAD_BIN * 2 + 3
reg_channel += (1 if not cfg.RCNN.LOC_Y_BY_BIN else loc_y_bin_num * 2)
reg_layers = []
pre_channel = channel_in
for k in range(0, cfg.RCNN.REG_FC.__len__()):
reg_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.REG_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.REG_FC[k]
reg_layers.append(pt_utils.Conv1d(pre_channel, reg_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
reg_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.reg_layer = nn.Sequential(*reg_layers)
self.proposal_target_layer = ProposalTargetLayer()
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layer[-1].conv.weight, mean=0, std=0.001)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, input_data):
"""
:param input_data: input dict
:return:
"""
if cfg.RCNN.ROI_SAMPLE_JIT:
if self.training:
with torch.no_grad():
target_dict = self.proposal_target_layer(input_data)
pts_input = torch.cat((target_dict['sampled_pts'], target_dict['pts_feature']), dim=2)
target_dict['pts_input'] = pts_input
else:
rpn_xyz, rpn_features = input_data['rpn_xyz'], input_data['rpn_features']
batch_rois = input_data['roi_boxes3d']
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [input_data['rpn_intensity'].unsqueeze(dim=2),
input_data['seg_mask'].unsqueeze(dim=2)]
else:
pts_extra_input_list = [input_data['seg_mask'].unsqueeze(dim=2)]
if cfg.RCNN.USE_DEPTH:
pts_depth = input_data['pts_depth'] / 70.0 - 0.5
pts_extra_input_list.append(pts_depth.unsqueeze(dim=2))
pts_extra_input = torch.cat(pts_extra_input_list, dim=2)
pts_feature = torch.cat((pts_extra_input, rpn_features), dim=2)
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_center = batch_rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
for k in range(batch_size):
pooled_features[k, :, :, 0:3] = kitti_utils.rotate_pc_along_y_torch(pooled_features[k, :, :, 0:3],
batch_rois[k, :, 6])
pts_input = pooled_features.view(-1, pooled_features.shape[2], pooled_features.shape[3])
else:
pts_input = input_data['pts_input'].view(-1,512,133)
target_dict = {}
target_dict['pts_input'] = input_data['pts_input'].view(-1,512,133)
target_dict['roi_boxes3d'] = input_data['roi_boxes3d'].view(-1,7)
if self.training:
target_dict['cls_label'] = input_data['cls_label'].view(-1)
target_dict['reg_valid_mask'] = input_data['reg_valid_mask'].view(-1)
target_dict['gt_of_rois'] = input_data['gt_boxes3d_ct'].view(-1,7)
xyz, features = self._break_up_pc(pts_input)
if cfg.RCNN.USE_RPN_FEATURES:
xyz_input = pts_input[..., 0:self.rcnn_input_channel].transpose(1, 2).unsqueeze(dim=3)
xyz_feature = self.xyz_up_layer(xyz_input)
rpn_feature = pts_input[..., self.rcnn_input_channel:].transpose(1, 2).unsqueeze(dim=3)
merged_feature = torch.cat((xyz_feature, rpn_feature), dim=1)
merged_feature = self.merge_down_layer(merged_feature)
l_xyz, l_features = [xyz], [merged_feature.squeeze(dim=3)]
else:
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
# print(input_data.shape)
# print(l_features[-1].shape)
if self.training:
num_proposals = cfg.RCNN.ROI_PER_IMAGE
else:
num_proposals = cfg.TEST.RPN_POST_NMS_TOP_N
# print(l_features[-1].shape)
# print(l_features[-1].shape)
# print(proposals)
if cfg.BATCH_SIZE == 1:
features = l_features[-1].view(1,-1,l_features[-1].shape[1],1).contiguous().transpose(1,2).contiguous()
else:
features = l_features[-1].view(-1,num_proposals,l_features[-1].shape[1],1).contiguous().transpose(1,2).contiguous()
#features = l_features[-1].view(cfg.BATCH_SIZE,num_proposals,l_features[-1].shape[1],1).contiguous().transpose(1,2).contiguous()
if cfg.RCNN.REF_CONFIG.USE_PROPOSALS:
proposals = input_data['roi_boxes3d'].view(-1,7)
prop_feat = torch.zeros_like(proposals)
prop_feat[:,0] = proposals[:,0]/80 + 0.5
prop_feat[:,1] = proposals[:,1]/10 + 0.5
prop_feat[:,2] = proposals[:,2]/70
prop_feat[:,3] = proposals[:,3]/5
prop_feat[:,4] = proposals[:,4]/10
prop_feat[:,5] = proposals[:,5]/5
prop_feat[:,6] = proposals[:,6]/(2*np.pi) + 0.5
# print(features.shape, prop_feat.shape)
features = torch.cat((features, prop_feat.transpose(0,1).contiguous().unsqueeze(0).unsqueeze(-1)), dim=1)
if cfg.RCNN.REF_CONFIG.USE_RPN_FEATS:
rpn_feats = pts_input[..., self.rcnn_input_channel:].transpose(1, 2).unsqueeze(dim=3)
# print(rpn_feats.shape)
rpn_feats = torch.max(rpn_feats, dim=2)[0].transpose(0,1).contiguous().unsqueeze(0)
# print(features.shape, rpn_feats.shape)
features = torch.cat((features, rpn_feats), dim=1)
# print(features.shape)
# print(xyz.shape)
features = self.refine(features).transpose(1,2).contiguous().view(-1,self.refine.channel_out,1).contiguous()
# print(features.shape)
# if cfg.RCNN.REF_CONFIG.USE_RCNN_FEATS:
# #print(features.shape, l_features[-1].shape)
# features = torch.cat((features, l_features[-1]), dim=1)
# print(features.shape)
rcnn_cls = self.cls_layer(features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layer(features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
# print(rcnn_cls.shape)
ret_dict = {'rcnn_cls': rcnn_cls, 'rcnn_reg': rcnn_reg}
if self.training:
ret_dict.update(target_dict)
return ret_dict
class RefineDeepRCNNNet(nn.Module):
def __init__(self, num_classes, input_channels=0, use_xyz=True):
super().__init__()
opt = DenseOpts()
self.backbone = DenseDeepGCN(opt)
channel_in = input_channels
opt = DenseOpts()
opt.head=False
opt.in_channels = self.backbone.channel_out + 7 * cfg.RCNN.REF_CONFIG.USE_PROPOSALS + 128 * cfg.RCNN.REF_CONFIG.USE_RPN_FEATS
# opts.constant_dilation=True
opt.n_blocks = cfg.RCNN.REF_CONFIG.N_BLOCKS
opt.kernel_size = cfg.RCNN.REF_CONFIG.KERNEL_SIZE
opt.n_filters = cfg.RCNN.REF_CONFIG.N_FILTERS
opt.conv = cfg.RCNN.REF_CONFIG.CONV
opt.constant_dilation=cfg.RCNN.REF_CONFIG.CONSTANT_DILATION
opt.linear_dilation=cfg.RCNN.REF_CONFIG.LINEAR_DILATION
self.refine = DenseDeepGCN(opt)
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
cls_layers = []
channel_in = self.refine.channel_out
pre_channel = channel_in
for k in range(0, cfg.RCNN.CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.CLS_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, cls_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
cls_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.cls_layer = nn.Sequential(*cls_layers)
if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss':
self.cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(alpha=cfg.RCNN.FOCAL_ALPHA[0],
gamma=cfg.RCNN.FOCAL_GAMMA)
elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy':
self.cls_loss_func = F.binary_cross_entropy
elif cfg.RCNN.LOSS_CLS == 'CrossEntropy':
cls_weight = torch.from_numpy(cfg.RCNN.CLS_WEIGHT).float()
self.cls_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduce=False, weight=cls_weight)
else:
raise NotImplementedError
# regression layer
per_loc_bin_num = int(cfg.RCNN.LOC_SCOPE / cfg.RCNN.LOC_BIN_SIZE) * 2
loc_y_bin_num = int(cfg.RCNN.LOC_Y_SCOPE / cfg.RCNN.LOC_Y_BIN_SIZE) * 2
reg_channel = per_loc_bin_num * 4 + cfg.RCNN.NUM_HEAD_BIN * 2 + 3
reg_channel += (1 if not cfg.RCNN.LOC_Y_BY_BIN else loc_y_bin_num * 2)
reg_layers = []
pre_channel = channel_in
for k in range(0, cfg.RCNN.REG_FC.__len__()):
reg_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.REG_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.REG_FC[k]
reg_layers.append(pt_utils.Conv1d(pre_channel, reg_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
reg_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.reg_layer = nn.Sequential(*reg_layers)
self.proposal_target_layer = ProposalTargetLayer()
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layer[-1].conv.weight, mean=0, std=0.001)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, input_data):
"""
:param input_data: input dict
:return:
"""
if cfg.RCNN.ROI_SAMPLE_JIT:
if self.training:
with torch.no_grad():
target_dict = self.proposal_target_layer(input_data)
pts_input = torch.cat((target_dict['sampled_pts'], target_dict['pts_feature']), dim=2)
target_dict['pts_input'] = pts_input
else:
rpn_xyz, rpn_features = input_data['rpn_xyz'], input_data['rpn_features']
batch_rois = input_data['roi_boxes3d']
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [input_data['rpn_intensity'].unsqueeze(dim=2),
input_data['seg_mask'].unsqueeze(dim=2)]
else:
pts_extra_input_list = [input_data['seg_mask'].unsqueeze(dim=2)]
if cfg.RCNN.USE_DEPTH:
pts_depth = input_data['pts_depth'] / 70.0 - 0.5
pts_extra_input_list.append(pts_depth.unsqueeze(dim=2))
pts_extra_input = torch.cat(pts_extra_input_list, dim=2)
pts_feature = torch.cat((pts_extra_input, rpn_features), dim=2)
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_center = batch_rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
for k in range(batch_size):
pooled_features[k, :, :, 0:3] = kitti_utils.rotate_pc_along_y_torch(pooled_features[k, :, :, 0:3],
batch_rois[k, :, 6])
pts_input = pooled_features.view(-1, pooled_features.shape[2], pooled_features.shape[3])
else:
pts_input = input_data['pts_input'].view(-1,512,133)
target_dict = {}
target_dict['pts_input'] = input_data['pts_input'].view(-1,512,133)
target_dict['roi_boxes3d'] = input_data['roi_boxes3d'].view(-1,7)
if self.training:
target_dict['cls_label'] = input_data['cls_label'].view(-1)
target_dict['reg_valid_mask'] = input_data['reg_valid_mask'].view(-1)
target_dict['gt_of_rois'] = input_data['gt_boxes3d_ct'].view(-1,7)
xyz, features = self._break_up_pc(pts_input)
pt_features = self.backbone(xyz.transpose(1,2).contiguous().unsqueeze(3))
features = torch.max(pt_features, dim=2)[0]
# print(input_data.shape)
# print(l_features[-1].shape)
if self.training:
num_proposals = cfg.RCNN.ROI_PER_IMAGE
else:
num_proposals = cfg.TEST.RPN_POST_NMS_TOP_N
if cfg.BATCH_SIZE == 1:
ref_features_prep = features.view(1,-1,features.shape[1],1).contiguous().transpose(1,2).contiguous()
else:
ref_features_prep = features.view(-1,num_proposals,features.shape[1],1).contiguous().transpose(1,2).contiguous()
#ref_features_prep = features.view(cfg.BATCH_SIZE,num_proposals,features.shape[1],1).contiguous().transpose(1,2).contiguous()
if cfg.RCNN.REF_CONFIG.USE_PROPOSALS:
proposals = input_data['roi_boxes3d'].view(-1,7)
prop_feat = torch.zeros_like(proposals)
prop_feat[:,0] = proposals[:,0]/80 + 0.5
prop_feat[:,1] = proposals[:,1]/10 + 0.5
prop_feat[:,2] = proposals[:,2]/70
prop_feat[:,3] = proposals[:,3]/5
prop_feat[:,4] = proposals[:,4]/10
prop_feat[:,5] = proposals[:,5]/5
prop_feat[:,6] = proposals[:,6]/(2*np.pi) + 0.5
# print(features.shape, prop_feat.shape)
ref_features_prep = torch.cat((ref_features_prep, prop_feat.transpose(0,1).contiguous().unsqueeze(0).unsqueeze(-1)), dim=1)
if cfg.RCNN.REF_CONFIG.USE_RPN_FEATS:
rpn_feats = pts_input[..., self.rcnn_input_channel:].transpose(1, 2).unsqueeze(dim=3)
# print(rpn_feats.shape)
rpn_feats = torch.max(rpn_feats, dim=2)[0].transpose(0,1).contiguous().unsqueeze(0)
# print(features.shape, rpn_feats.shape)
ref_features_prep = torch.cat((ref_features_prep, rpn_feats), dim=1)
# print(features.shape)
# print(xyz.shape)
ref_features = self.refine(ref_features_prep).transpose(1,2).contiguous().view(-1,self.refine.channel_out,1).contiguous()
# if cfg.RCNN.REF_CONFIG.USE_RCNN_FEATS:
# #print(features.shape, l_features[-1].shape)
# ref_features = torch.cat((ref_features, l_features[-1]), dim=1)
# print(features.shape)
rcnn_cls = self.cls_layer(ref_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layer(ref_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
# print(rcnn_cls.shape)
ret_dict = {'rcnn_cls': rcnn_cls, 'rcnn_reg': rcnn_reg}
if self.training:
ret_dict.update(target_dict)
return ret_dict
class DenseFeatRefineRCNN(nn.Module):
def __init__(self, num_classes, input_channels=0, use_xyz=True):
super().__init__()
if cfg.RCNN.USE_RPN_FEATURES:
self.rcnn_input_channel = 3 + int(cfg.RCNN.USE_INTENSITY) + int(cfg.RCNN.USE_MASK) + int(cfg.RCNN.USE_DEPTH)
self.xyz_up_layer = pt_utils.SharedMLP([self.rcnn_input_channel] + cfg.RCNN.XYZ_UP_LAYER,
bn=cfg.RCNN.USE_BN)
c_out = cfg.RCNN.XYZ_UP_LAYER[-1]
self.merge_down_layer = pt_utils.SharedMLP([c_out * 2, c_out], bn=cfg.RCNN.USE_BN)
opt = DenseOpts()
opt.in_channels = input_channels
self.backbone = DenseDeepGCN(opt)
# channel_in = input_channels
opt = DenseOpts()
opt.head=False
opt.in_channels = self.backbone.channel_out + 7 * cfg.RCNN.REF_CONFIG.USE_PROPOSALS + 128 * cfg.RCNN.REF_CONFIG.USE_RPN_FEATS
# opts.constant_dilation=True
opt.n_blocks = cfg.RCNN.REF_CONFIG.N_BLOCKS
opt.kernel_size = cfg.RCNN.REF_CONFIG.KERNEL_SIZE
opt.n_filters = cfg.RCNN.REF_CONFIG.N_FILTERS
opt.conv = cfg.RCNN.REF_CONFIG.CONV
opt.constant_dilation=cfg.RCNN.REF_CONFIG.CONSTANT_DILATION
opt.linear_dilation=cfg.RCNN.REF_CONFIG.LINEAR_DILATION
self.refine = DenseDeepGCN(opt)
# classification layer
cls_channel = 1 if num_classes == 2 else num_classes
cls_layers = []
channel_in = self.refine.channel_out
pre_channel = channel_in
for k in range(0, cfg.RCNN.CLS_FC.__len__()):
cls_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.CLS_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.CLS_FC[k]
cls_layers.append(pt_utils.Conv1d(pre_channel, cls_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
cls_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.cls_layer = nn.Sequential(*cls_layers)
if cfg.RCNN.LOSS_CLS == 'SigmoidFocalLoss':
self.cls_loss_func = loss_utils.SigmoidFocalClassificationLoss(alpha=cfg.RCNN.FOCAL_ALPHA[0],
gamma=cfg.RCNN.FOCAL_GAMMA)
elif cfg.RCNN.LOSS_CLS == 'BinaryCrossEntropy':
self.cls_loss_func = F.binary_cross_entropy
elif cfg.RCNN.LOSS_CLS == 'CrossEntropy':
cls_weight = torch.from_numpy(cfg.RCNN.CLS_WEIGHT).float()
self.cls_loss_func = nn.CrossEntropyLoss(ignore_index=-1, reduce=False, weight=cls_weight)
else:
raise NotImplementedError
# regression layer
per_loc_bin_num = int(cfg.RCNN.LOC_SCOPE / cfg.RCNN.LOC_BIN_SIZE) * 2
loc_y_bin_num = int(cfg.RCNN.LOC_Y_SCOPE / cfg.RCNN.LOC_Y_BIN_SIZE) * 2
reg_channel = per_loc_bin_num * 4 + cfg.RCNN.NUM_HEAD_BIN * 2 + 3
reg_channel += (1 if not cfg.RCNN.LOC_Y_BY_BIN else loc_y_bin_num * 2)
reg_layers = []
pre_channel = channel_in
for k in range(0, cfg.RCNN.REG_FC.__len__()):
reg_layers.append(pt_utils.Conv1d(pre_channel, cfg.RCNN.REG_FC[k], bn=cfg.RCNN.USE_BN))
pre_channel = cfg.RCNN.REG_FC[k]
reg_layers.append(pt_utils.Conv1d(pre_channel, reg_channel, activation=None))
if cfg.RCNN.DP_RATIO >= 0:
reg_layers.insert(1, nn.Dropout(cfg.RCNN.DP_RATIO))
self.reg_layer = nn.Sequential(*reg_layers)
self.proposal_target_layer = ProposalTargetLayer()
self.init_weights(weight_init='xavier')
def init_weights(self, weight_init='xavier'):
if weight_init == 'kaiming':
init_func = nn.init.kaiming_normal_
elif weight_init == 'xavier':
init_func = nn.init.xavier_normal_
elif weight_init == 'normal':
init_func = nn.init.normal_
else:
raise NotImplementedError
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):
if weight_init == 'normal':
init_func(m.weight, mean=0, std=0.001)
else:
init_func(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.reg_layer[-1].conv.weight, mean=0, std=0.001)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, input_data):
"""
:param input_data: input dict
:return:
"""
if cfg.RCNN.ROI_SAMPLE_JIT:
if self.training:
with torch.no_grad():
target_dict = self.proposal_target_layer(input_data)
pts_input = torch.cat((target_dict['sampled_pts'], target_dict['pts_feature']), dim=2)
target_dict['pts_input'] = pts_input
else:
rpn_xyz, rpn_features = input_data['rpn_xyz'], input_data['rpn_features']
batch_rois = input_data['roi_boxes3d']
if cfg.RCNN.USE_INTENSITY:
pts_extra_input_list = [input_data['rpn_intensity'].unsqueeze(dim=2),
input_data['seg_mask'].unsqueeze(dim=2)]
else:
pts_extra_input_list = [input_data['seg_mask'].unsqueeze(dim=2)]
if cfg.RCNN.USE_DEPTH:
pts_depth = input_data['pts_depth'] / 70.0 - 0.5
pts_extra_input_list.append(pts_depth.unsqueeze(dim=2))
pts_extra_input = torch.cat(pts_extra_input_list, dim=2)
pts_feature = torch.cat((pts_extra_input, rpn_features), dim=2)
pooled_features, pooled_empty_flag = \
roipool3d_utils.roipool3d_gpu(rpn_xyz, pts_feature, batch_rois, cfg.RCNN.POOL_EXTRA_WIDTH,
sampled_pt_num=cfg.RCNN.NUM_POINTS)
# canonical transformation
batch_size = batch_rois.shape[0]
roi_center = batch_rois[:, :, 0:3]
pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2)
for k in range(batch_size):
pooled_features[k, :, :, 0:3] = kitti_utils.rotate_pc_along_y_torch(pooled_features[k, :, :, 0:3],
batch_rois[k, :, 6])
pts_input = pooled_features.view(-1, pooled_features.shape[2], pooled_features.shape[3])
else:
pts_input = input_data['pts_input'].view(-1,512,133)
target_dict = {}
target_dict['pts_input'] = input_data['pts_input'].view(-1,512,133)
target_dict['roi_boxes3d'] = input_data['roi_boxes3d'].view(-1,7)
if self.training:
target_dict['cls_label'] = input_data['cls_label'].view(-1)
target_dict['reg_valid_mask'] = input_data['reg_valid_mask'].view(-1)
target_dict['gt_of_rois'] = input_data['gt_boxes3d_ct'].view(-1,7)
xyz, features = self._break_up_pc(pts_input)
if cfg.RCNN.USE_RPN_FEATURES:
xyz_input = pts_input[..., 0:self.rcnn_input_channel].transpose(1, 2).unsqueeze(dim=3)
xyz_feature = self.xyz_up_layer(xyz_input)
rpn_feature = pts_input[..., self.rcnn_input_channel:].transpose(1, 2).unsqueeze(dim=3)
merged_feature = torch.cat((xyz_feature, rpn_feature), dim=1)
merged_feature = self.merge_down_layer(merged_feature)
l_xyz, l_features = [xyz], [merged_feature.squeeze(dim=3)]
else:
l_xyz, l_features = [xyz], [features]
pt_features = self.backbone(l_features[-1].unsqueeze(3))
features = torch.max(pt_features, dim=2)[0]
if self.training:
num_proposals = cfg.RCNN.ROI_PER_IMAGE
else:
num_proposals = cfg.TEST.RPN_POST_NMS_TOP_N
if cfg.BATCH_SIZE == 1:
ref_features_prep = features.view(1,-1,features.shape[1],1).contiguous().transpose(1,2).contiguous()
else:
ref_features_prep = features.view(-1,num_proposals,features.shape[1],1).contiguous().transpose(1,2).contiguous()
#ref_features_prep = features.view(cfg.BATCH_SIZE,num_proposals,features.shape[1],1).contiguous().transpose(1,2).contiguous()
if cfg.RCNN.REF_CONFIG.USE_PROPOSALS:
proposals = input_data['roi_boxes3d'].view(-1,7)
# print(target_dict['gt_of_rois'].shape)
prop_feat = torch.zeros_like(proposals)
prop_feat[:,0] = proposals[:,0]/80 + 0.5
prop_feat[:,1] = proposals[:,1]/10 + 0.5
prop_feat[:,2] = proposals[:,2]/70
prop_feat[:,3] = proposals[:,3]/5
prop_feat[:,4] = proposals[:,4]/10
prop_feat[:,5] = proposals[:,5]/5
prop_feat[:,6] = proposals[:,6]/(2*np.pi) + 0.5
# print(ref_features_prep.shape, prop_feat.shape)
ref_features_prep = torch.cat((ref_features_prep, prop_feat.transpose(0,1).contiguous().unsqueeze(0).unsqueeze(-1)), dim=1)
if cfg.RCNN.REF_CONFIG.USE_RPN_FEATS:
rpn_feats = pts_input[..., self.rcnn_input_channel:].transpose(1, 2).unsqueeze(dim=3)
# print(rpn_feats.shape)
rpn_feats = torch.max(rpn_feats, dim=2)[0].transpose(0,1).contiguous().unsqueeze(0)
# print(features.shape, rpn_feats.shape)
ref_features_prep = torch.cat((ref_features_prep, rpn_feats), dim=1)
# print(features.shape)
# print(xyz.shape)
ref_features = self.refine(ref_features_prep).transpose(1,2).contiguous().view(-1,self.refine.channel_out,1).contiguous()
rcnn_cls = self.cls_layer(ref_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2)
rcnn_reg = self.reg_layer(ref_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C)
ret_dict = {'rcnn_cls': rcnn_cls, 'rcnn_reg': rcnn_reg}
if self.training:
ret_dict.update(target_dict)
return ret_dict
| 46.668712
| 187
| 0.588382
| 11,146
| 83,677
| 4.133591
| 0.036695
| 0.05348
| 0.01541
| 0.020055
| 0.877086
| 0.858876
| 0.843922
| 0.838647
| 0.834046
| 0.832223
| 0
| 0.022973
| 0.291466
| 83,677
| 1,793
| 188
| 46.668712
| 0.754132
| 0.060913
| 0
| 0.815162
| 0
| 0
| 0.034005
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034657
| false
| 0
| 0.009386
| 0.001444
| 0.072202
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73a61b6fa78c485f1e1604a9e94300405f39746a
| 6,283
|
py
|
Python
|
GTSRB/gtsrb_dataset.py
|
THUYimingLi/Open-sourced_Dataset_Protection
|
910962c57e7d132497443b26c8e5da1dcb5ba4eb
|
[
"Apache-2.0"
] | 14
|
2020-11-16T03:57:19.000Z
|
2022-03-30T01:44:53.000Z
|
GTSRB/gtsrb_dataset.py
|
THUYimingLi/Open-sourced_Dataset_Protection
|
910962c57e7d132497443b26c8e5da1dcb5ba4eb
|
[
"Apache-2.0"
] | null | null | null |
GTSRB/gtsrb_dataset.py
|
THUYimingLi/Open-sourced_Dataset_Protection
|
910962c57e7d132497443b26c8e5da1dcb5ba4eb
|
[
"Apache-2.0"
] | 5
|
2020-11-16T03:56:00.000Z
|
2022-03-19T06:37:02.000Z
|
import torch
import os
import pandas as pd
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
import random
class GTSRB(Dataset):
base_folder = 'GTSRB'
def __init__(self, root_dir, train=False, transform=None, y_target=None):
"""
Args:
train (bool): Load trainingset or test set.
root_dir (string): Directory containing GTSRB folder.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.sub_directory = 'trainingset' if train else 'testset'
self.csv_file_name = 'training.csv' if train else 'test.csv'
csv_file_path = os.path.join(
root_dir, self.base_folder, self.sub_directory, self.csv_file_name)
self.csv_data = pd.read_csv(csv_file_path)
self.transform = transform
if y_target is not None:
self.csv_data.iloc[:, 1] = y_target
def __len__(self):
return len(self.csv_data)
def __getitem__(self, idx):
img_path = os.path.join(self.root_dir, self.base_folder, self.sub_directory,
self.csv_data.iloc[idx, 0])
img = Image.open(img_path)
classId = self.csv_data.iloc[idx, 1]
if self.transform is not None:
img = self.transform(img)
return img, classId
class GTSRB_subset(Dataset):
base_folder = 'GTSRB'
def __init__(self, root_dir, train=False, transform=None, List=[], y_target=None):
"""
Args:
train (bool): Load trainingset or test set.
root_dir (string): Directory containing GTSRB folder.
transform (callable, optional): Optional transform to be applied
on a sample.
List: the index of selected sample idxs
"""
assert len(List) > 0, "Dataset should contain at least one sample"
self.root_dir = root_dir
self.sub_directory = 'trainingset' if train else 'testset'
self.csv_file_name = 'training.csv' if train else 'test.csv'
csv_file_path = os.path.join(
root_dir, self.base_folder, self.sub_directory, self.csv_file_name)
self.csv_data = pd.read_csv(csv_file_path).iloc[List]
self.transform = transform
if y_target is not None:
self.csv_data.iloc[:, 1] = y_target
def __len__(self):
return len(self.csv_data)
def __getitem__(self, idx):
img_path = os.path.join(self.root_dir, self.base_folder, self.sub_directory,
self.csv_data.iloc[idx, 0])
img = Image.open(img_path)
classId = self.csv_data.iloc[idx, 1]
if self.transform is not None:
img = self.transform(img)
return img, classId
class GTSRB_subclass(Dataset):
base_folder = 'GTSRB'
def __init__(self, root_dir, train=False, transform=None, Class=2, y_target=None):
"""
Args:
train (bool): Load trainingset or test set.
root_dir (string): Directory containing GTSRB folder.
transform (callable, optional): Optional transform to be applied
on a sample.
Class: the selected class
"""
assert len(List) > 0, "Dataset should contain at least one sample"
self.root_dir = root_dir
self.sub_directory = 'trainingset' if train else 'testset'
self.csv_file_name = 'training.csv' if train else 'test.csv'
self.Class = Class
csv_file_path = os.path.join(
root_dir, self.base_folder, self.sub_directory, self.csv_file_name)
All_data = pd.read_csv(csv_file_path)
List = [i for i in range(len(All_data)) if All_data.iloc[i, 1] == self.Class]
self.csv_data = pd.read_csv(csv_file_path).iloc[List]
self.transform = transform
if y_target is not None:
self.csv_data.iloc[:, 1] = y_target
def __len__(self):
return len(self.csv_data)
def __getitem__(self, idx):
img_path = os.path.join(self.root_dir, self.base_folder, self.sub_directory,
self.csv_data.iloc[idx, 0])
img = Image.open(img_path)
classId = self.csv_data.iloc[idx, 1]
if self.transform is not None:
img = self.transform(img)
return img, classId
class GTSRB_Testset(Dataset):
base_folder = 'GTSRB'
def __init__(self, root_dir, train=False, transform=None, select_class=2, num_img=100):
"""
Args:
train (bool): Load trainingset or test set.
root_dir (string): Directory containing GTSRB folder.
transform (callable, optional): Optional transform to be applied
on a sample.
selected class: the class of selected samples
num_img: number of selected images
"""
self.root_dir = root_dir
self.sub_directory = 'trainingset' if train else 'testset'
self.csv_file_name = 'training.csv' if train else 'test.csv'
csv_file_path = os.path.join(
root_dir, self.base_folder, self.sub_directory, self.csv_file_name)
self.csv_data = pd.read_csv(csv_file_path)
self.csv_data_new = pd.DataFrame(columns=['Filename', 'ClassId'])
for i in range(len(self.csv_data)):
if self.csv_data.iloc[i, 1] == select_class:
self.csv_data_new = self.csv_data_new.append(self.csv_data.iloc[i])
# randomly idx
random.seed(random.randint(1, 10000))
idx = list(np.arange(len(self.csv_data_new)))
random.shuffle(idx)
image_idx = idx[:num_img]
self.csv_data_final = self.csv_data_new.iloc[image_idx] # final data
self.transform = transform
def __len__(self):
return len(self.csv_data_final)
def __getitem__(self, idx):
img_path = os.path.join(self.root_dir, self.base_folder, self.sub_directory,
self.csv_data_final.iloc[idx, 0])
img = Image.open(img_path)
classId = self.csv_data_final.iloc[idx, 1]
if self.transform is not None:
img = self.transform(img)
return img, classId
| 33.420213
| 91
| 0.616584
| 851
| 6,283
| 4.314924
| 0.128085
| 0.068627
| 0.083878
| 0.044935
| 0.828431
| 0.812092
| 0.809368
| 0.802832
| 0.794662
| 0.794662
| 0
| 0.005817
| 0.288556
| 6,283
| 188
| 92
| 33.420213
| 0.81566
| 0.158364
| 0
| 0.719626
| 0
| 0
| 0.053823
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 1
| 0.11215
| false
| 0
| 0.065421
| 0.037383
| 0.327103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73fbda3e611949920121e48e8f46385237314c91
| 13
|
py
|
Python
|
hacktw/add.py
|
nane121/HacktoberFest2020
|
29eb99754ee93f643d4b0bd7e18570079e718d59
|
[
"MIT"
] | 25
|
2020-10-01T05:44:04.000Z
|
2020-10-30T17:30:26.000Z
|
hacktw/add.py
|
nane121/HacktoberFest2020
|
29eb99754ee93f643d4b0bd7e18570079e718d59
|
[
"MIT"
] | 14
|
2020-10-01T09:32:47.000Z
|
2020-11-05T16:17:12.000Z
|
hacktw/add.py
|
nane121/HacktoberFest2020
|
29eb99754ee93f643d4b0bd7e18570079e718d59
|
[
"MIT"
] | 143
|
2020-10-01T05:47:04.000Z
|
2021-10-03T04:25:42.000Z
|
print(20+20)
| 6.5
| 12
| 0.692308
| 3
| 13
| 3
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.076923
| 13
| 1
| 13
| 13
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
fb6fe7eb7bf8187e3cf9497cb8d790449df29a31
| 589
|
py
|
Python
|
Codewars/7kyu/slamming-lockers/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/7kyu/slamming-lockers/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/7kyu/slamming-lockers/Python/test.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 2.7.6
test.assert_equals(locker_run(1), [1])
test.assert_equals(locker_run(2), [1])
test.assert_equals(locker_run(3), [1])
test.assert_equals(locker_run(5), [1, 4])
test.assert_equals(locker_run(8), [1, 4])
test.assert_equals(locker_run(10), [1, 4, 9])
test.assert_equals(locker_run(20), [1, 4, 9, 16])
test.assert_equals(locker_run(50), [1, 4, 9, 16, 25, 36, 49])
test.assert_equals(locker_run(100), [1, 4, 9, 16, 25, 36, 49, 64, 81, 100])
test.assert_equals(locker_run(500), [1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484])
| 45.307692
| 135
| 0.66893
| 117
| 589
| 3.196581
| 0.324786
| 0.26738
| 0.427807
| 0.588235
| 0.812834
| 0.47861
| 0.270053
| 0.096257
| 0.096257
| 0.096257
| 0
| 0.226488
| 0.11545
| 589
| 12
| 136
| 49.083333
| 0.491363
| 0.023769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
83c557df53053c2d8cc900328e7ecffd5c63900d
| 1,622
|
py
|
Python
|
Bruteforce-main/Brute.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
Bruteforce-main/Brute.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
Bruteforce-main/Brute.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2
|
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#ricod mulu dah lah
#Encypt BY MR.1557
#Di Makasih 50 subscribe
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b32decode("PCOHKVC5OPNUIFB5WJRSOTXCPS5U4GYKFLSSGLLAEXICIQ6KGBIAFBAHGKO7AAFIME6IU52TZOYSYR53TKDJTPHFE7YAQ3546MR7QKP5ENYPO2VFFAQ2YRTXZ667PXFTXNLR6POYGFE7OF3UVPEQUIAAT6WAHX4BVDAK6QCU4FLSCJXAJ5ANJYGXEDVPB2YQSPYCOING4BHUCNBAUYYWZQDP4R4BV7TURZT6BT2AZZQDACZRQNFQPDVE7JM3IZIVNXIUJ663PUJZRS6BPQHBM4VQNCAFQAUZMUZFIZ6KFDJRMK2WUZC3GQ7TDWXCSEO2EVBILZSVTJVTJX5V7Y65MZWZE3ZEFE2YWPCDPLD776DP7JQVPYVVXGOO4XHWCX5Y6DTSCFZADZALZDOLBIDIXOFEWYXOD7CPZAS6Z7PSHP6GLSDFBRLDRUMCNRQHAB3SD3RCDQRK4QOMIO6YSYWYQ55RFXPIG6A73OLY5Y5XEHNRRLGCL6CVRB24YVUR3SLLQ5LBPD7MNEP5WNT4ICTSEAJ22Q2AM5ZBH3SL5HYJI7JOH7OISI5TJI2MU36I75GEPZMPP6EFECZ7MYXT5GTZXUIV724IJN2LPCVYJH4U6BDAT32EVKM6DDNNNDW4KJZ3J5EXTARDDNI7QD4NANOSHNZXSRA2EDQJOVXRS3TBNYO24HP7SNKFLETV777IXZMPRKJBEMWFC5M4R2TYI7OVZUGL4VF2C4RNZNZLSMK554RN7DMXZN5ELVU2N33VDIOVFVBCKCWVL2LXLD5OSACZ462TSY36S6TW24L5ML2TRLO6N67XDBORRVBNJ2N4LXBC23S4LCROCT7JLCUVZUU5476RNXRWN5V2N46G27SXVJQ3JP2MOU7U4KHESC32WVRYGYKKD6ZM5D2XKGBE5ZQXQEUEDH46OOGV5GYXIM7P7PB45ZWW22HKX2DXA7H6KF7O3HDFOT7WKXO2JP4OQJYMRMU6G2GO75RYPYIHP5U7M423XLT2JBJV725R3L23ZE2EZLHETUETY5YXZWXJYWITPDC7Q2R45OB7KZJ4IGYHJJ27JOLGZVCJXV25AQ5OA5JFF43GCWO2FSSNFYZI2SOGKHOVGL5ZGSJQCIASW6Z5PE5I5Q4R4YX4O7P5Y5XWVXUA46SPAKSOQT5U3HEOASX6NBWG6KTN3H6U6I25MQNMGPTPUPMKGM6WFW3H3NRZXMR5MYN3GHNTHVR2XNBZL4RM6UVJWTJONQ2SX4VO4WTMB5B25DFU5F5RFPHKQ2RXDKQPJUIZHUZEHN5TZFVDRQRQGMIMI4ZNHEHHTQ2QC3UH2JETRVTZJPGSDMWO2FNAZTAD2HTF7U4LI6NBIOJIZ5B323W6TMV4H7IQF5M6TMZIC3UVA7TOIJVI6TG5XFYVKZQ5OM2WXC2OYNPDLOQ6HCNU5LN2444MI2KVTLCHQTEZLHT35M2Q7EL7IN32YU======"))))
| 270.333333
| 1,530
| 0.975956
| 24
| 1,622
| 65.958333
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.200871
| 0.008631
| 1,622
| 5
| 1,531
| 324.4
| 0.783582
| 0.035758
| 0
| 0
| 0
| 0
| 0.942985
| 0.942985
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
83d967b858f881f7d518ad7ff89272000e3f63de
| 5,925
|
py
|
Python
|
tests/end_to_end_tests/tests/test_cpp11_embed_binary_data.py
|
jcn509/cpp_embed
|
e8763fe8c00e094cffb1c06a3a1206b79e388987
|
[
"MIT"
] | null | null | null |
tests/end_to_end_tests/tests/test_cpp11_embed_binary_data.py
|
jcn509/cpp_embed
|
e8763fe8c00e094cffb1c06a3a1206b79e388987
|
[
"MIT"
] | null | null | null |
tests/end_to_end_tests/tests/test_cpp11_embed_binary_data.py
|
jcn509/cpp_embed
|
e8763fe8c00e094cffb1c06a3a1206b79e388987
|
[
"MIT"
] | null | null | null |
"""Tests to ensure that a valid header file can be generated from binary data"""
from pathlib import Path
import pytest
from .utilities import OUTPUT_TO_FILE_FLAGS, run_cpp11_embed, TEST_FILES_DIR
def _get_expected_binary_data_header(
identifier_name: str, use_header_guard: bool, expected_data: str, expected_size: int
) -> str:
if use_header_guard:
header_guard = identifier_name.upper()
return f"#ifndef {header_guard}\n#define {header_guard}\n\n#include <array>\n#include <cstdint>\n\nconstexpr std::array<uint8_t, {expected_size}> {identifier_name}{expected_data};\n\n#endif\n"
return f"#pragma once\n\n#include <array>\n#include <cstdint>\n\nconstexpr std::array<uint8_t, {expected_size}> {identifier_name}{expected_data};\n"
@pytest.mark.parametrize("identifier_name", ("test_name", "other_name"))
@pytest.mark.parametrize("use_header_guard", (True, False))
@pytest.mark.parametrize("binary_mode_flag", ("--binary-mode", "-b"))
def test_successful_binary_file_input_output_to_stdout(
identifier_name: str, use_header_guard: bool, binary_mode_flag: str
):
"""Test that a binary file can be read successfully and the correct header
is generated and printed to standard output.
Also makes sure that the return code is 0 and nothing is written to
standard error.
"""
result = run_cpp11_embed(
TEST_FILES_DIR / "one_line.txt",
identifier_name,
use_header_guard,
other_arguments=(binary_mode_flag,),
)
expected_data = "{97, 98, 99, 100, 101, 102}"
assert result.stdout == _get_expected_binary_data_header(
identifier_name, use_header_guard, expected_data, 6
)
assert result.stderr == "", "No errors reported"
assert result.returncode == 0, "No errors reported"
@pytest.mark.parametrize("identifier_name", ("test_name", "other_name"))
@pytest.mark.parametrize("use_header_guard", (True, False))
@pytest.mark.parametrize("binary_mode_flag", ("--binary-mode", "-b"))
@pytest.mark.parametrize("output_to_file_flag", OUTPUT_TO_FILE_FLAGS)
@pytest.mark.parametrize("output_filename", ("out.txt", "other.h"))
def test_successful_binary_file_input_output_to_file(
identifier_name: str,
use_header_guard: bool,
binary_mode_flag: str,
output_to_file_flag: str,
output_filename: str,
tmp_path: Path,
):
"""Test that a binary file can be read successfully and the correct header
is generated and written to a file.
Also makes sure that the return code is 0 and nothing is written to
standard output or standard error.
"""
output_file_path = tmp_path / output_filename
result = run_cpp11_embed(
TEST_FILES_DIR / "one_line.txt",
identifier_name,
use_header_guard,
other_arguments=(binary_mode_flag, output_to_file_flag, output_file_path),
)
expected_data = "{97, 98, 99, 100, 101, 102}"
assert result.stdout == "", "Nothing written to standard output"
assert output_file_path.read_text() == _get_expected_binary_data_header(
identifier_name, use_header_guard, expected_data, 6
), "Correct header written to file"
assert result.stderr == "", "No errors reported"
assert result.returncode == 0, "No errors reported"
@pytest.mark.parametrize("identifier_name", ("test_name", "other_name"))
@pytest.mark.parametrize("use_header_guard", (True, False))
@pytest.mark.parametrize("binary_mode_flag", ("--binary-mode", "-b"))
def test_successful_binary_stdin_output_to_stdout(
identifier_name: str, use_header_guard: bool, binary_mode_flag: str
):
"""Test that binary data can be read successfully from standard input and
the correct header is generated and printed to standard output.
Also makes sure that the return code is 0 and nothing is written to
standard error.
"""
file_contents = Path(TEST_FILES_DIR / "one_line.txt").read_text()
result = run_cpp11_embed(
"-",
identifier_name,
use_header_guard,
other_arguments=(binary_mode_flag,),
standard_input=file_contents,
)
expected_data = "{97, 98, 99, 100, 101, 102}"
assert result.stdout == _get_expected_binary_data_header(
identifier_name, use_header_guard, expected_data, 6
)
assert result.stderr == "", "No errors reported"
assert result.returncode == 0, "No errors reported"
@pytest.mark.parametrize("identifier_name", ("test_name", "other_name"))
@pytest.mark.parametrize("use_header_guard", (True, False))
@pytest.mark.parametrize("binary_mode_flag", ("--binary-mode", "-b"))
@pytest.mark.parametrize("output_to_file_flag", OUTPUT_TO_FILE_FLAGS)
@pytest.mark.parametrize("output_filename", ("out.txt", "other.h"))
def test_successful_binary_stdin_output_to_file(
identifier_name: str,
use_header_guard: bool,
binary_mode_flag: str,
output_to_file_flag: str,
output_filename: str,
tmp_path: Path,
):
"""Test that binary data can be read successfully from standard input and
the correct header is generated and written to a file.
Also makes sure that the return code is 0 and nothing is written to
standard output or standard error.
"""
file_contents = Path(TEST_FILES_DIR / "one_line.txt").read_text()
output_file_path = tmp_path / output_filename
result = run_cpp11_embed(
"-",
identifier_name,
use_header_guard,
other_arguments=(binary_mode_flag, output_to_file_flag, output_file_path),
standard_input=file_contents,
)
expected_data = "{97, 98, 99, 100, 101, 102}"
assert result.stdout == "", "Nothing written to standard output"
assert output_file_path.read_text() == _get_expected_binary_data_header(
identifier_name, use_header_guard, expected_data, 6
), "Correct header written to file"
assert result.stderr == "", "No errors reported"
assert result.returncode == 0, "No errors reported"
| 41.433566
| 200
| 0.72
| 822
| 5,925
| 4.891727
| 0.131387
| 0.057448
| 0.062671
| 0.04576
| 0.930863
| 0.930863
| 0.924646
| 0.90674
| 0.902263
| 0.902263
| 0
| 0.017125
| 0.172152
| 5,925
| 142
| 201
| 41.725352
| 0.80265
| 0.158312
| 0
| 0.807692
| 1
| 0.019231
| 0.239117
| 0.035357
| 0
| 0
| 0
| 0
| 0.134615
| 1
| 0.048077
| false
| 0
| 0.028846
| 0
| 0.096154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7917af00d905c729c3eaebc229512461028331a7
| 14,258
|
py
|
Python
|
src/svtk/svtk/cli/pesr_test.py
|
leipzig/gatk-sv
|
96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a
|
[
"BSD-3-Clause"
] | 76
|
2020-06-18T21:31:43.000Z
|
2022-03-02T18:42:58.000Z
|
src/svtk/svtk/cli/pesr_test.py
|
leipzig/gatk-sv
|
96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a
|
[
"BSD-3-Clause"
] | 195
|
2020-06-22T15:12:28.000Z
|
2022-03-28T18:06:46.000Z
|
src/svtk/svtk/cli/pesr_test.py
|
leipzig/gatk-sv
|
96566cbbaf0f8f9c8452517b38eea1e5dd6ed33a
|
[
"BSD-3-Clause"
] | 39
|
2020-07-03T06:47:18.000Z
|
2022-03-03T03:47:25.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 Matthew Stone <mstone5@mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
Calculate enrichment of clipped reads or discordant pairs at SV breakpoints.
"""
import argparse
import sys
import pysam
import pandas as pd
from svtk.pesr import SRTestRunner, PETestRunner, PETest, SRTest
def sr_test(argv):
parser = argparse.ArgumentParser(
description="Calculate enrichment of clipped reads at SV breakpoints.",
prog='svtk sr-test',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf',
help='VCF of variant calls. Standardized to include '
'CHR2, END, SVTYPE, STRANDS in INFO.')
parser.add_argument('countfile', help='Tabix indexed file of split counts.'
' Columns: chrom,pos,clip,count,sample')
parser.add_argument('fout',
help='Output table of most significant start/end'
'positions.')
parser.add_argument('-w', '--window', type=int, default=100,
help='Window around variant start/end to consider for '
'split read support. [100]')
parser.add_argument('--common', default=False,
action='store_true', help='Ignore background for common AF')
parser.add_argument('-b', '--background', type=int, default=160,
help='Number of background samples to choose for '
'comparison in t-test. [160]')
parser.add_argument('-s', '--samples', type=argparse.FileType('r'),
default=None,
help='Whitelist of samples to restrict testing to.')
parser.add_argument('--index', default=None,
help='Tabix index of discordant pair file. Required if '
'discordant pair file is hosted remotely.')
# TODO: add normalization
parser.add_argument('--medianfile', default=None,
help='Median coverage statistics for each library '
'(optional). If provided, each sample\'s split '
'counts will be normalized accordingly. '
'Same format as RdTest, one column per sample.')
parser.add_argument('--log', action='store_true', default=False,
help='Print progress log to stderr.')
# Print help if no arguments specified
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
vcf = pysam.VariantFile(args.vcf)
if args.index is not None:
countfile = pysam.TabixFile(args.countfile, index=args.index,
parser=pysam.asTuple())
else:
if args.countfile.startswith('http'):
raise Exception('Must provide tabix index with remote URL')
countfile = pysam.TabixFile(args.countfile, parser=pysam.asTuple())
if args.fout in '- stdout'.split():
fout = sys.stdout
else:
fout = open(args.fout, 'w')
header = 'name coord pos log_pval called_median bg_median bg_frac'.split()
fout.write('\t'.join(header) + '\n')
if args.samples is not None:
whitelist = [s.strip() for s in args.samples.readlines()]
else:
whitelist = None
if args.medianfile is not None:
medians = pd.read_table(args.medianfile)
medians = pd.melt(medians, var_name='sample', value_name='median_cov')
else:
medians = None
runner = SRTestRunner(vcf, countfile, fout, args.background, args.common,
args.window, whitelist, medians=medians, log=args.log)
runner.run()
def pe_test(argv):
parser = argparse.ArgumentParser(
description="Calculate enrichment of discordant pairs at SV breakpoints.",
prog='svtk pe-test',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf', help='Variants.')
parser.add_argument('disc', help='Table of discordant pair coordinates.')
parser.add_argument('fout', type=argparse.FileType('w'),
help='Output table of PE counts.')
parser.add_argument('-o', '--window-out', type=int, default=500,
help='Window outside breakpoint to query for '
'discordant pairs. [500]')
parser.add_argument('-i', '--window-in', type=int, default=50,
help='Window inside breakpoint to query for '
'discordant pairs. [50]')
parser.add_argument('-b', '--background', type=int, default=160,
help='Number of background samples to sample for PE '
'evidence. [160]')
parser.add_argument('--common', default=False,
action='store_true', help='Ignore background for common AF')
parser.add_argument('-s', '--samples', type=argparse.FileType('r'),
default=None,
help='Whitelist of samples to restrict testing to.')
parser.add_argument('--index', default=None,
help='Tabix index of discordant pair file. Required if '
'discordant pair file is hosted remotely.')
parser.add_argument('--medianfile', default=None,
help='Median coverage statistics for each library '
'(optional). If provided, each sample\'s split '
'counts will be normalized accordingly. '
'Same format as RdTest, one column per sample.')
parser.add_argument('--log', action='store_true', default=False,
help='Print progress log to stderr.')
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
if args.vcf in '- stdin'.split():
vcf = pysam.VariantFile(sys.stdin)
else:
vcf = pysam.VariantFile(args.vcf)
if args.fout in '- stdout'.split():
fout = sys.stdout
else:
fout = args.fout
header = 'name log_pval called_median bg_median bg_frac'.split()
args.fout.write('\t'.join(header) + '\n')
if args.samples is not None:
whitelist = [s.strip() for s in args.samples.readlines()]
else:
whitelist = None
if args.index is not None:
discfile = pysam.TabixFile(args.disc, index=args.index)
else:
if args.disc.startswith('http'):
raise Exception('Must provide tabix index with remote URL')
discfile = pysam.TabixFile(args.disc)
if args.medianfile is not None:
medians = pd.read_table(args.medianfile)
medians = pd.melt(medians, var_name='sample', value_name='median_cov')
else:
medians = None
runner = PETestRunner(vcf, discfile, fout, args.background, args.common,
args.window_in, args.window_out, whitelist, medians=medians, log=args.log)
runner.run()
def count_pe(argv):
parser = argparse.ArgumentParser(
description="Count discordant pairs supporting a SV breakpoints.",
prog='svtk count-pe',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf', help='Variants.')
parser.add_argument('disc', help='Table of discordant pair coordinates.')
parser.add_argument('fout', type=argparse.FileType('w'),
help='Output table of PE counts.')
parser.add_argument('-o', '--window-out', type=int, default=500,
help='Window outside breakpoint to query for '
'discordant pairs. [500]')
parser.add_argument('-i', '--window-in', type=int, default=50,
help='Window inside breakpoint to query for '
'discordant pairs. [50]')
parser.add_argument('--common', default=False,
action='store_true', help='Ignore background for common AF')
parser.add_argument('-s', '--samples', type=argparse.FileType('r'),
default=None,
help='Whitelist of samples to restrict testing to.')
parser.add_argument('--index', default=None,
help='Tabix index of discordant pair file. Required if '
'discordant pair file is hosted remotely.')
parser.add_argument('--medianfile', default=None,
help='Median coverage statistics for each library '
'(optional). If provided, each sample\'s split '
'counts will be normalized accordingly. '
'Same format as RdTest, one column per sample.')
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
if args.vcf in '- stdin'.split():
vcf = pysam.VariantFile(sys.stdin)
else:
vcf = pysam.VariantFile(args.vcf)
if args.fout in '- stdout'.split():
fout = sys.stdout
else:
fout = args.fout
header = 'name sample count'.split()
args.fout.write('\t'.join(header) + '\n')
if args.samples is not None:
whitelist = [s.strip() for s in args.samples.readlines()]
else:
whitelist = [s for s in vcf.header.samples]
if args.index is not None:
discfile = pysam.TabixFile(args.disc, index=args.index)
else:
if args.disc.startswith('http'):
raise Exception('Must provide tabix index with remote URL')
discfile = pysam.TabixFile(args.disc)
if args.medianfile is not None:
medians = pd.read_table(args.medianfile)
medians = pd.melt(medians, var_name='sample', value_name='median_cov')
else:
medians = None
petest = PETest(discfile, args.common, args.window_in,
args.window_out, medians=medians)
for record in vcf:
counts = petest.load_counts(record, args.window_in, args.window_out)
counts = petest.normalize_counts(counts)
counts = counts.set_index('sample')
counts = counts.reindex(whitelist).fillna(0).astype(int)
counts = counts.reset_index()
counts['name'] = record.id
cols = 'name sample count'.split()
for row in counts[cols].as_matrix():
fout.write('\t'.join([str(x) for x in row]) + '\n')
# counts[cols].to_csv(fout, header=False, index=False, sep='\t', na_rep='NA')
def count_sr(argv):
parser = argparse.ArgumentParser(
description="Count clipped reads at SV breakpoints. Unwindowed.",
prog='svtk count-sr',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf',
help='VCF of variant calls. Standardized to include '
'CHR2, END, SVTYPE, STRANDS in INFO.')
parser.add_argument('countfile', help='Tabix indexed file of split counts.'
' Columns: chrom,pos,clip,count,sample')
parser.add_argument('fout',
help='Output table of split read counts.')
parser.add_argument('--common', default=False,
action='store_true', help='Ignore background for common AF')
parser.add_argument('-s', '--samples', type=argparse.FileType('r'),
default=None,
help='Whitelist of samples to restrict testing to.')
parser.add_argument('--index', default=None,
help='Tabix index of discordant pair file. Required if '
'discordant pair file is hosted remotely.')
# TODO: add normalization
parser.add_argument('--medianfile', default=None,
help='Median coverage statistics for each library '
'(optional). If provided, each sample\'s split '
'counts will be normalized accordingly. '
'Same format as RdTest, one column per sample.')
# Print help if no arguments specified
if len(argv) == 0:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
vcf = pysam.VariantFile(args.vcf)
if args.index is not None:
countfile = pysam.TabixFile(args.countfile, index=args.index,
parser=pysam.asTuple())
else:
if args.countfile.startswith('http'):
raise Exception('Must provide tabix index with remote URL')
countfile = pysam.TabixFile(args.countfile, parser=pysam.asTuple())
if args.fout in '- stdout'.split():
fout = sys.stdout
else:
fout = open(args.fout, 'w')
header = 'name coord sample count'.split()
fout.write('\t'.join(header) + '\n')
if args.samples is not None:
whitelist = [s.strip() for s in args.samples.readlines()]
else:
whitelist = [s for s in vcf.header.samples]
if args.medianfile is not None:
medians = pd.read_table(args.medianfile)
medians = pd.melt(medians, var_name='sample', value_name='median_cov')
else:
medians = None
srtest = SRTest(countfile, args.common, window=0, medians=medians)
for record in vcf:
for coord in 'start end'.split():
if coord == 'start':
pos, strand, chrom = record.pos, record.info['STRANDS'][0], record.chrom
else:
# TODO: With a properly formatted VCF, should be using END2 instead of END here
pos, strand, chrom = record.stop, record.info['STRANDS'][1], record.info['CHR2']
counts = srtest.load_counts(chrom, pos, strand)
counts = srtest.normalize_counts(counts)
counts = counts['sample count'.split()]
counts = counts.set_index('sample')
counts = counts.reindex(whitelist).fillna(0).astype(int)
counts = counts.reset_index()
counts['name'] = record.id
counts['coord'] = coord
for row in counts[header].values:
fout.write('\t'.join([str(x) for x in row]) + '\n')
# counts[header].to_csv(fout, header=False, index=False, sep='\t', na_rep='NA')
| 42.688623
| 100
| 0.591948
| 1,666
| 14,258
| 5.005402
| 0.148259
| 0.039933
| 0.075429
| 0.010073
| 0.872047
| 0.846025
| 0.824799
| 0.818084
| 0.809689
| 0.774673
| 0
| 0.006046
| 0.292397
| 14,258
| 333
| 101
| 42.816817
| 0.820398
| 0.040398
| 0
| 0.8125
| 0
| 0
| 0.258801
| 0.003952
| 0
| 0
| 0
| 0.003003
| 0
| 1
| 0.014706
| false
| 0
| 0.018382
| 0
| 0.033088
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f78a315ebf727c1baa8bb1a6c42e0daa961757ff
| 1,542
|
py
|
Python
|
stix2generator/test/test_object_generator_string.py
|
majacQ/cti-stix-generator
|
7465ecd29ef6caabf9f1b60ad45dad789c475028
|
[
"BSD-3-Clause"
] | 20
|
2020-12-10T18:16:28.000Z
|
2022-02-20T19:30:53.000Z
|
stix2generator/test/test_object_generator_string.py
|
majacQ/cti-stix-generator
|
7465ecd29ef6caabf9f1b60ad45dad789c475028
|
[
"BSD-3-Clause"
] | 26
|
2021-01-13T23:32:19.000Z
|
2022-03-29T06:47:02.000Z
|
stix2generator/test/test_object_generator_string.py
|
majacQ/cti-stix-generator
|
7465ecd29ef6caabf9f1b60ad45dad789c475028
|
[
"BSD-3-Clause"
] | 8
|
2020-12-14T23:10:16.000Z
|
2021-12-06T13:07:24.000Z
|
import pytest
import stix2generator.exceptions
def test_string(object_generator, num_trials):
for _ in range(num_trials):
value = object_generator.generate_from_spec({
"type": "string",
"minLength": 1,
"maxLength": 5
})
assert 1 <= len(value) <= 5
assert isinstance(value, str)
def test_string_missing_length_bounds(object_generator):
with pytest.raises(stix2generator.exceptions.ObjectGenerationError):
object_generator.generate_from_spec({
"type": "string",
"minLength": 1
})
with pytest.raises(stix2generator.exceptions.ObjectGenerationError):
object_generator.generate_from_spec({
"type": "string",
"maxLength": 5
})
def test_string_inverted_bounds(object_generator):
with pytest.raises(stix2generator.exceptions.ObjectGenerationError):
object_generator.generate_from_spec({
"type": "string",
"minLength": 5,
"maxLength": 1
})
def test_string_negative_bounds(object_generator):
with pytest.raises(stix2generator.exceptions.ObjectGenerationError):
object_generator.generate_from_spec({
"type": "string",
"minLength": 1,
"maxLength": -5
})
with pytest.raises(stix2generator.exceptions.ObjectGenerationError):
object_generator.generate_from_spec({
"type": "string",
"minLength": -1,
"maxLength": 5
})
| 28.036364
| 72
| 0.623217
| 141
| 1,542
| 6.560284
| 0.248227
| 0.162162
| 0.149189
| 0.175135
| 0.749189
| 0.749189
| 0.749189
| 0.749189
| 0.749189
| 0.749189
| 0
| 0.016071
| 0.273671
| 1,542
| 54
| 73
| 28.555556
| 0.809821
| 0
| 0
| 0.642857
| 1
| 0
| 0.097276
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 1
| 0.095238
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3a09d55942c07fa74707a04997c4e7b9a7e6d09
| 30,717
|
py
|
Python
|
Code/plotting.py
|
MartinSchiemer/Revisiting_the_Information_Plane
|
0376d4a30d3753698f5985d657c92c3395def3ac
|
[
"MIT"
] | 1
|
2021-07-19T02:07:01.000Z
|
2021-07-19T02:07:01.000Z
|
Code/plotting.py
|
MartinSchiemer/Revisiting_the_Information_Plane
|
0376d4a30d3753698f5985d657c92c3395def3ac
|
[
"MIT"
] | null | null | null |
Code/plotting.py
|
MartinSchiemer/Revisiting_the_Information_Plane
|
0376d4a30d3753698f5985d657c92c3395def3ac
|
[
"MIT"
] | null | null | null |
"""
Author: Martin Schiemer
provides plotting functionalitites
"""
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import seaborn as sns
import os
import sys
import math
def plot_bin_histo(name, layer_nr, outputs_obj, mi_obj, show_flag, save_flag, limit = False):
"""
plots histogram of the bins after binning estimation
name: name of the network
layer_nr: index of the layer which should be plotted
outputs_obj: output object which holds the activation dictionary
mi_obj: mutual information object
show_flag: flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
limit: flag that decides if the plot range should be limited
"""
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, sharey=False)
fig.set_figheight(10)
fig.set_figwidth(15)
#fig.suptitle(("Empty bins development for layer " + str(layer_nr) + " (" + name + ", score: "+ str(mi_obj.model_score) + ")"))
fig.suptitle(("Empty bins development for layer " + str(layer_nr) + " (score: "+ str(mi_obj.model_score) + ")"), fontsize=15)
ax3.set_xlabel("Bins")
ax1.set_ylabel("Amount in bin")
ax2.set_ylabel("Amount in bin")
ax3.set_ylabel("Amount in bin")
ax1.set_title("Bins at epoch 0")
ax2.set_title("Bins at half of the epochs")
ax3.set_title("Bins at the last epoch")
layer_keys = [key for key in outputs_obj.digitized.keys() if key[1] == layer_nr]
interval_keys = [layer_keys[0], layer_keys[len(layer_keys)//2], layer_keys[-1]]
ax1.hist([x for y in outputs_obj.digitized[interval_keys[0]][0] for x in y],
mi_obj.bin_edges[interval_keys[0]])
ax2.hist([x for y in outputs_obj.digitized[interval_keys[1]][0] for x in y],
mi_obj.bin_edges[interval_keys[1]])
ax3.hist([x for y in outputs_obj.digitized[interval_keys[2]][0] for x in y],
mi_obj.bin_edges[interval_keys[2]])
if limit == True:
ax1.set_xlim(left=0.1, right=None)
ax2.set_xlim(left=0.1, right=None)
ax3.set_xlim(left=0.1, right=None)
ax1.set_ylim(top= 150)
ax2.set_ylim(top= 150)
ax3.set_ylim(top= 150)
if save_flag == True:
if not os.path.exists("Results/Plots/EmptyBins/"):
try:
os.makedirs("Results/Plots/EmptyBins/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/EmptyBins/" + name + "_layer" + str(layer_nr) + "_BinHisto.png")
if show_flag == True:
plt.show()
else:
plt.close()
def plot_empty_bins(name, MI_obj, max_epochs, color_list, show_flag, save_flag, full_flag=True):
"""
plots plot of empty bins in % after binning estimation
name: name of the network
MI_obj: mutual information object
max_epochs: maximum nr of epochs
color_list: list of colours for different layers
show_flag: flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
full_flag: flag that decides if plot has 1 or 2 axis (1 for full epoch 2 for full epoch
and part)
"""
print("Plotting empty bins")
if full_flag == False:
fig, (ax1, ax2) = plt.subplots(2, 1, sharey=True)
else:
fig, ax1 = plt.subplots()
fig.set_figheight(10)
fig.set_figwidth(15)
#fig.suptitle(("Empty bins development (" + name + ", score: "+ str(MI_obj.model_score) + ")"))
fig.suptitle(("Empty bins development (score: "+ str(MI_obj.model_score) + ")"), fontsize=15)
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Amount of empty bins in %")
layer_nrs = [key[1] for key in MI_obj.unused_bins.keys()]
max_layer = np.amax(layer_nrs)
activation_name_list = MI_obj.act_func
label_count = 0
for key in MI_obj.unused_bins.keys():
if full_flag == False:
if label_count < len(activation_name_list):
ax1.scatter(key[0], (MI_obj.unused_bins[key]/MI_obj.tot_bins[key])*100,
color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activation_name_list[key[1]])
ax2.scatter(key[0], (MI_obj.unused_bins[key]/MI_obj.tot_bins[key])*100,
color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activation_name_list[key[1]])
else:
ax1.scatter(key[0], (MI_obj.unused_bins[key]/MI_obj.tot_bins[key])*100,
color=color_list[key[1]])
ax2.scatter(key[0], (MI_obj.unused_bins[key]/MI_obj.tot_bins[key])*100,
color=color_list[key[1]])
else:
if label_count < len(activation_name_list):
ax1.scatter(key[0], (MI_obj.unused_bins[key]/MI_obj.tot_bins[key])*100,
color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activation_name_list[key[1]])
else:
ax1.scatter(key[0], (MI_obj.unused_bins[key]/MI_obj.tot_bins[key])*100,
color=color_list[key[1]])
label_count += 1
ax1.legend()
if full_flag == False:
ax2.set_xlim(left=0, right=max_epochs)
ax1.set_xlim(left=0, right=None)
ax1.set_ylim(bottom=0, top=None)
remove_neg_ticks(ax1, "x")
remove_neg_ticks(ax1, "y")
plt.tight_layout()
if save_flag == True:
if not os.path.exists("Results/Plots/EmptyBins/"):
try:
os.makedirs("Results/Plots/EmptyBins/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/EmptyBins/" + name + "_EmptyBins.png")
if show_flag == True:
plt.show()
else:
plt.close()
def plot_std_abs_activations(name, activation_obj, color_list, max_epochs,
show_flag, save_flag, full_flag):
"""
plots absolute standard deviation of the activations
name: name of the network
activation_obj: output object that holds the activations
color_list: list of colours for different layers
max_epochs: maximum nr of epochs
show_flag: flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
full_flag: flag that decides if plot has 1 or 2 axis (1 for full epoch 2 for full epoch
and part)
"""
print("Creating standard deviation development plot")
if full_flag == False:
fig, (ax1, ax2) = plt.subplots(2, 1, sharey=True)
else:
fig, ax1 = plt.subplots()
fig.set_figheight(10)
fig.set_figwidth(15)
plt.subplots_adjust(
top = 0.94,
wspace = 0.1,
)
#fig.suptitle(("Mean and Standard Deviation (" + name + ", score: "+ str(activation_obj.model_score) + ")"))
fig.suptitle(("Mean and Standard Deviation, score: "+ str(activation_obj.model_score) + ")"), fontsize=15)
ax1.set_xlabel("Epochs")
ax1.set_ylabel("Standard Deviation")
layer_nrs = [key[1] for key in activation_obj.dic.keys()]
max_layer = np.amax(layer_nrs)
activation_name_list = activation_obj.act_func
output_layers_with05_clustered_std = ["sigmoid", "tanh", "softmax"]
label_count = 0
for key in activation_obj.dic.keys():
if full_flag == False:
if (key[1] == max_layer and
activation_name_list[key[1]] in output_layers_with05_clustered_std):
if label_count < len(activation_name_list):
# -.5 offset to concentrate the 0 and 1 cluster at one point after
# taking the absolute value
ax1.scatter(key[0], np.std(abs(activation_obj.dic[key][0]-0.5)),
color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activation_name_list[key[1]])
ax2.scatter(key[0], np.std(abs(activation_obj.dic[key][0]-0.5)),
color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activation_name_list[key[1]])
else:
ax1.scatter(key[0], np.std(abs(activation_obj.dic[key][0]-0.5)),
color=color_list[key[1]])
ax2.scatter(key[0], np.std(abs(activation_obj.dic[key][0]-0.5)),
color=color_list[key[1]])
else:
if label_count < len(activation_name_list):
ax1.scatter(key[0], np.std(abs(activation_obj.dic[key][0])),
color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activation_name_list[key[1]])
ax2.scatter(key[0], np.std(abs(activation_obj.dic[key][0])),
color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activation_name_list[key[1]])
else:
ax1.scatter(key[0], np.std(abs(activation_obj.dic[key][0])),
color=color_list[key[1]])
ax2.scatter(key[0], np.std(abs(activation_obj.dic[key][0])),
color=color_list[key[1]])
else:
if (key[1] == max_layer and
activation_name_list[key[1]] in output_layers_with05_clustered_std):
if label_count < len(activation_name_list):
# -.5 offset to concentrate the 0 and 1 cluster at one point after
# taking the absolute value
ax1.scatter(key[0], np.std(abs(activation_obj.dic[key][0]-0.5)),
color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activation_name_list[key[1]])
else:
ax1.scatter(key[0], np.std(abs(activation_obj.dic[key][0]-0.5)),
color=color_list[key[1]])
else:
if label_count < len(activation_name_list):
ax1.scatter(key[0], np.std(abs(activation_obj.dic[key][0])),
color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activation_name_list[key[1]])
else:
ax1.scatter(key[0], np.std(abs(activation_obj.dic[key][0])),
color=color_list[key[1]])
label_count +=1
ax1.legend()
if full_flag == False:
ax2.set_xlim(left=0, right=max_epochs)
ax1.set_xlim(left=0, right=None)
ax1.set_ylim(bottom=0, top=None)
remove_neg_ticks(ax1, "x")
remove_neg_ticks(ax1, "y")
plt.tight_layout()
if save_flag == True:
if not os.path.exists("Results/Plots/MeanSTD/"):
try:
os.makedirs("Results/Plots/MeanSTD/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/MeanSTD/" + name + "_MeanSTD.png")
if show_flag == True:
plt.show()
else:
plt.close()
def remove_neg_ticks(ax, x_or_y):
"""
removes negative ticks of plot axis
ax: axis where neg ticks hould be removed
x_or_y: flag if x or y axis
"""
if x_or_y == "x":
axticks = [tick for tick in ax.get_xticks() if tick >=0]
ax.set_xticks(axticks)
if x_or_y == "y":
axticks = [tick for tick in ax.get_yticks() if tick >=0]
ax.set_yticks(axticks)
def plot_test_development(test_score_dic, name, show_flag, save_flag):
"""
plots test score development
test_score_dic: dictionary with test scores
name: name of the network
show_flag: flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
"""
if not test_score_dic:
print("testscore has not been recorded")
else:
print("creating testscore devolopment plot")
cmap = plt.get_cmap('gnuplot')
last_it = np.amax(list(test_score_dic.keys()))
colors = [cmap(i) for i in np.linspace(0, 1, last_it + 1)]
fig, ax1 = plt.subplots()
fig.set_figheight(10)
fig.set_figwidth(15)
#ax1.set_title(("Test score per epoch (" + name + ", last epoch: " + str(last_it) + ")"))
ax1.set_title(("Test score per epoch (last epoch: " + str(last_it) + ")"), fontsize=15)
ax1.set_xlabel("Epoch")
ax1.set_ylabel("Test Score")
for key in test_score_dic:
ax1.scatter(key, test_score_dic[key], color=colors[key])
#ax1.set_xlim(left=0, right=None)
ax1.set_ylim(bottom=0, top=None)
#ax1.set_xbound(lower=-0.05)
ax1.set_ybound(lower=-0.05)
#remove_neg_ticks(ax1, "x")
remove_neg_ticks(ax1, "y")
#ax1.set_xticks(ax1xticks)
#ax1.set_xticks(ax1xticks)
#fig.tight_layout()
plt.tight_layout()
if save_flag == True:
if not os.path.exists("Results/Plots/Testscore/"):
try:
os.makedirs("Results/Plots/Testscore/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/Testscore/" + name + "_testscore.png")
if show_flag == True:
plt.show()
else:
plt.close()
def plot_history(h_object, name, show_flag, save_flag):
"""
plots traning and validation loss development
h_object: keras history object
name: name of the network
show_flag: flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
"""
print("creating history plot")
keys = [*h_object.keys()]
last_it = len(h_object["loss"]) - 1
# compares the loss and metric for equality
list_compare = all( math.isclose(e1, e2, abs_tol=0.1) for e1, e2 in
zip(h_object[keys[0]], h_object[keys[1]]))
# if loss and metric are the same only plot 1
if list_compare:
fig, ax1 = plt.subplots(1, 1)
fig.set_figheight(10)
fig.set_figwidth(15)
# summarize history for loss
ax1.plot(h_object['loss'])
ax1.plot(h_object['val_loss'])
#ax1.set_title('Model Loss (' + name + " score: " +
# str(h_object["model_score"]) + ", last epoch: " + str(last_it + 1) + ")" )
ax1.set_title("Model Loss (score: " +
str(h_object["model_score"]) + ", last epoch: " + str(last_it + 1) + ")", fontsize=15)
ax1.set_ylabel('loss')
ax1.set_xlabel('epoch')
ax1.legend(['train', 'validation'], loc='upper left')
ax1.set_xlim(left=0, right=None)
ax1.set_ylim(bottom=0, top=None)
ax1.set_xbound(lower=-0.05)
ax1.set_ybound(lower=-0.05)
remove_neg_ticks(ax1, "x")
remove_neg_ticks(ax1, "y")
#ax1.set_xticks(ax1xticks)
else:
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
fig.set_figheight(10)
fig.set_figwidth(15)
# summarize history for accuracy
ax1.plot(h_object[keys[1]])
ax1.plot(h_object[keys[3]])
ax1.set_title(('model ' + str(keys[1]) + "(score: " + str(h_object["model_score"]) + ")"))
ax1.set_ylabel(keys[1])
ax1.set_xlabel('epoch')
ax1.legend(['train', 'validation'], loc='upper left')
# summarize history for loss
ax2.plot(h_object['loss'])
ax2.plot(h_object['val_loss'])
ax2.set_title('model loss')
ax2.set_ylabel('loss')
ax2.set_xlabel('epoch')
ax2.legend(['train', 'validation'], loc='upper left')
# #ax1.set_xlim(left=0-0.5, right=None)
ax1.set_ylim(bottom=0, top=None)
# #ax2.set_xlim(left=0-0.5, right = None)
ax2.set_ylim(bottom=0, top=None)
ax1.set_ybound(lower=-0.05)
ax2.set_ybound(lower=-0.05)
remove_neg_ticks(ax1, "y")
remove_neg_ticks(ax2, "y")
#fig.tight_layout()
plt.tight_layout()
if save_flag == True:
if not os.path.exists("Results/Plots/History/"):
try:
os.makedirs("Results/Plots/History/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/History/" + name + "_loss.png")
if show_flag == True:
plt.show()
else:
plt.close()
def plot_separate_info_plane_layer_view(MI_object, name, color_l, show_flag, save_flag):
"""
plots information plane separate into different layers
MI_object: mutual information object
name: name of the network
color_l: list of colours for different layers
show_flag:flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
"""
print("creating separate info plane layer view plot")
activations = MI_object.act_func
fig, axes = plt.subplots(len(activations),2,sharex=True,sharey=True)
fig.set_figheight(30)
fig.set_figwidth(15)
plt.subplots_adjust(
top = 0.97,
wspace = 0.05,
)
#fig.suptitle(("Information Plane (" + name + ", score: " + str(MI_object.model_score) + ")"))
fig.suptitle(("Information Plane (score: " + str(MI_object.model_score) + ")"), fontsize=15)
color_list = color_l
cmap = plt.get_cmap('gnuplot')
last_it = np.amax(list(MI_object.mi_x.keys()))
colors = [cmap(i) for i in np.linspace(0, 1, last_it + 1)]
# controls start and stop sign
label_count = 0
sp_label_count = 0
for key in MI_object.mi_x.keys():
# epochview
axes[key[1],1].plot(MI_object.mi_x[key], MI_object.mi_y[key],marker="o",
markersize=9, linewidth=0.2, color=colors[key[0]])
# layerview
if key[0] == 0:
if sp_label_count == 0:
axes[key[1],0].scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, label='start')
sp_label_count += 1
else:
axes[key[1],0].scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5)
elif key[0] == list(MI_object.mi_x.keys())[-1][0]:
if sp_label_count == 1:
axes[key[1],0].scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, marker="v", label='end')
sp_label_count += 1
else:
axes[key[1],0].scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, marker="v")
else:
if label_count < len(activations):
axes[key[1],0].scatter(MI_object.mi_x[key],
MI_object.mi_y[key], color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activations[key[1]])
label_count += 1
else:
axes[key[1],0].scatter(MI_object.mi_x[key],
MI_object.mi_y[key], color=color_list[key[1]])
# unify axes to start from 0
for i in range(len(activations)):
for j in range(2):
axes[i,j].set_xlabel("I(X;T)")
axes[i,j].set_ylabel("I(T;Y)")
axes[i,j].set_xlim(left=0, right=None)
axes[i,j].set_ylim(bottom=0, top=None)
#axes[i,j].set_xbound(lower=-0.05)
#axes[i,j].set_ybound(lower=-0.05)
remove_neg_ticks(axes[i,j], "x")
remove_neg_ticks(axes[i,j], "y")
#fig.tight_layout()
plt.tight_layout()
if save_flag == True:
if not os.path.exists("Results/Plots/LayerviewSplit/"):
try:
os.makedirs("Results/Plots/LayerviewSplit/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/LayerviewSplit/" + name + "_layerviewsplit.png")
if show_flag == True:
plt.show()
else:
plt.close()
def plot_info_plane_layer_view(MI_object, name, color_l, show_flag, save_flag):
"""
plots information plane in a layer view
MI_object: mutual information object
name: name of the network
color_l: list of colours for different layers
show_flag:flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
"""
print("creating info plane layer view plot")
fig, ax = plt.subplots()
fig.set_figheight(10)
fig.set_figwidth(15)
#ax.set_title(("Information Plane (" + name + ", score: " + str(MI_object.model_score) + ")"))
ax.set_title(("Information Plane (score: " + str(MI_object.model_score) + ")"), fontsize=15)
ax.set_xlabel("I(X;T)")
ax.set_ylabel("I(T;Y)")
color_list = color_l
activations = MI_object.act_func
label_count = 0
sp_label_count = 0
for key in MI_object.mi_x.keys():
if key[0] == 0:
if sp_label_count == 0:
ax.scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, label='start')
sp_label_count += 1
else:
ax.scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5)
elif key[0] == list(MI_object.mi_x.keys())[-1][0]:
if sp_label_count == 1:
ax.scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, marker="v", label='end')
sp_label_count += 1
else:
ax.scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, marker="v")
else:
if label_count < len(activations):
ax.scatter(MI_object.mi_x[key],
MI_object.mi_y[key], color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activations[key[1]])
label_count += 1
else:
ax.scatter(MI_object.mi_x[key],
MI_object.mi_y[key], color=color_list[key[1]])
ax.legend()
ax.set_xlim(left=0, right=None)
ax.set_ylim(bottom=0, top=None)
ax.set_xbound(lower=-0.05)
ax.set_ybound(lower=-0.05)
remove_neg_ticks(ax, "x")
remove_neg_ticks(ax, "y")
#fig.tight_layout()
plt.tight_layout()
if save_flag == True:
if not os.path.exists("Results/Plots/Layerview/"):
try:
os.makedirs("Results/Plots/Layerview/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/Layerview/" + name + "_layerview.png")
if show_flag == True:
plt.show()
else:
plt.close()
def plot_info_plane_epoch_view(MI_object, name, show_flag, save_flag):
"""
plots information plane in an epoch view
MI_object: mutual information object
name: name of the network
color_l: list of colours for different layers
show_flag:flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
"""
print("creating info plane epoch view plot")
fig, ax = plt.subplots()
fig.set_figheight(10)
fig.set_figwidth(15)
#ax.set_title(("Information Plane (" + name + ", score: "+ str(MI_object.model_score) + ")"))
ax.set_title(("Information Plane (score: "+ str(MI_object.model_score) + ")"), fontsize=15)
ax.set_xlabel("I(X;T)")
ax.set_ylabel("I(T;Y)")
activations = MI_object.act_func
cmap = plt.get_cmap('gnuplot')
last_it = np.amax(list(MI_object.mi_x.keys()))
colors = [cmap(i) for i in np.linspace(0, 1, last_it + 1)]
mi_x_list = []
mi_y_list = []
act_count = 0
for key in MI_object.mi_x.keys():
if act_count < len(activations):
mi_x_list.append(MI_object.mi_x[key])
mi_y_list.append(MI_object.mi_y[key])
act_count += 1
if act_count == len(activations):
c = colors[key[0]]
ax.plot(mi_x_list, mi_y_list, marker="o",
markersize=9, linewidth=0.2, color=c)
act_count = 0
mi_x_list = []
mi_y_list = []
ax.set_xlim(left=0, right=None)
ax.set_ylim(bottom=0, top=None)
ax.set_xbound(lower=-0.05)
ax.set_ybound(lower=-0.05)
remove_neg_ticks(ax, "x")
remove_neg_ticks(ax, "y")
#fig.tight_layout()
plt.tight_layout()
if save_flag == True:
if not os.path.exists("Results/Plots/Epochview/"):
try:
os.makedirs("Results/Plots/Epochview/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/Epochview/" + name + "_epochview.png")
if show_flag == True:
plt.show()
else:
plt.close()
def plot_info_plane_combination_view(MI_object, name, color_l, show_flag, save_flag):
"""
plots information plane in a combination of epoch and layer view
MI_object: mutual information object
name: name of the network
color_l: list of colours for different layers
show_flag:flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
"""
print("Creating combinationview plot")
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
fig.set_figheight(10)
fig.set_figwidth(24)
plt.subplots_adjust(
top = 0.94,
wspace = 0.05,
)
#fig.suptitle(("Information Plane (" + name + ", score: "+ str(MI_object.model_score) + ")"))
fig.suptitle(("Information Plane (score: "+ str(MI_object.model_score) + ")"), fontsize=15)
ax1.set_xlabel("I(X;T)")
ax1.set_ylabel("I(T;Y)")
ax2.set_xlabel("I(X;T)")
ax2.set_ylabel("I(T;Y)")
activations = MI_object.act_func
cmap = plt.get_cmap('gnuplot')
last_it = np.amax(list(MI_object.mi_x.keys()))
colors = [cmap(i) for i in np.linspace(0, 1, last_it + 1)]
color_list = color_l
activations = MI_object.act_func
mi_x_list = []
mi_y_list = []
label_count = 0
sp_label_count = 0
act_count = 0
for key in MI_object.mi_x.keys():
#epochview
if act_count < len(activations):
mi_x_list.append(MI_object.mi_x[key])
mi_y_list.append(MI_object.mi_y[key])
act_count += 1
if act_count == len(activations):
c = colors[key[0]]
ax1.plot(mi_x_list, mi_y_list, marker="o",
markersize=9, linewidth=0.2, color=c)
act_count = 0
mi_x_list = []
mi_y_list = []
# layerview
if key[0] == 0:
if sp_label_count == 0:
ax2.scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, label='start')
sp_label_count += 1
else:
ax2.scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5)
elif key[0] == list(MI_object.mi_x.keys())[-1][0]:
if sp_label_count == 1:
ax2.scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, marker="v", label='end')
sp_label_count += 1
else:
ax2.scatter(MI_object.mi_x[key], MI_object.mi_y[key],
color='black', linewidth=5, marker="v")
else:
if label_count < len(activations):
ax2.scatter(MI_object.mi_x[key],
MI_object.mi_y[key], color=color_list[key[1]], label="l"+str(key[1]+1)+ " " +activations[key[1]])
label_count += 1
else:
ax2.scatter(MI_object.mi_x[key],
MI_object.mi_y[key], color=color_list[key[1]])
ax2.legend()
ax1.set_xlim(left=0, right=None)
ax1.set_ylim(bottom=0, top=None)
ax2.set_xlim(left=0, right = None)
ax2.set_ylim(bottom=0, top=None)
ax1.set_xbound(lower=-0.05)
ax2.set_xbound(lower=-0.05)
ax1.set_ybound(lower=-0.05)
ax2.set_ybound(lower=-0.05)
remove_neg_ticks(ax1, "x")
remove_neg_ticks(ax1, "y")
remove_neg_ticks(ax2, "x")
remove_neg_ticks(ax2, "y")
#fig.tight_layout()
plt.tight_layout()
if save_flag == True:
if not os.path.exists("Results/Plots/Combinationview/"):
try:
os.makedirs("Results/Plots/Combinationview/")
except OSError as error:
if error.errno != errno.EEXIST:
raise
plt.savefig("Results/Plots/Combinationview/" + name + "_combinationview.png")
if show_flag == True:
plt.show()
else:
plt.close()
def plot_info_plane(MI_object, name, separate_flag, color_l, show_flag, save_flag):
"""
starts information plane plotting and creates plots for epoch, layer and separated view
MI_object: mutual information object
name: name of the network
color_l: list of colours for different layers
show_flag:flag that decides if plot should be displayed
save_flag: flag that decides if plot should be saved
"""
fontsize = "15"
params = { #'figure.autolayout':True,
'legend.fontsize': "12",
'axes.labelsize': fontsize,
'axes.titlesize': fontsize,
'xtick.labelsize':fontsize,
'ytick.labelsize':fontsize}
plt.rcParams.update(params)
plot_info_plane_layer_view(MI_object, name, color_l, show_flag, save_flag)
plot_info_plane_epoch_view(MI_object, name, show_flag, save_flag)
plot_info_plane_combination_view(MI_object, name, color_l, show_flag, save_flag)
if separate_flag == True:
plot_separate_info_plane_layer_view(MI_object, name, color_l, show_flag, save_flag)
| 38.833123
| 131
| 0.57128
| 4,229
| 30,717
| 3.959328
| 0.069993
| 0.037745
| 0.031056
| 0.020366
| 0.828655
| 0.779384
| 0.753106
| 0.732083
| 0.708851
| 0.69822
| 0
| 0.02789
| 0.301983
| 30,717
| 791
| 132
| 38.833123
| 0.753043
| 0.164599
| 0
| 0.701818
| 0
| 0
| 0.08178
| 0.026533
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.010909
| 0
| 0.030909
| 0.016364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3abd1f9fda64fd66e1968b9b4f95437cb7b1e89
| 13,742
|
py
|
Python
|
typings/bl_ui/space_info.py
|
Argmaster/PyR3
|
6786bcb6a101fe4bd4cc50fe43767b8178504b15
|
[
"MIT"
] | 2
|
2021-12-12T18:51:52.000Z
|
2022-02-23T09:49:16.000Z
|
src/blender/blender_autocomplete-master/2.92/bl_ui/space_info.py
|
JonasWard/ClayAdventures
|
a716445ac690e4792e70658319aa1d5299f9c9e9
|
[
"MIT"
] | 2
|
2021-11-08T12:09:02.000Z
|
2021-12-12T23:01:12.000Z
|
typings/bl_ui/space_info.py
|
Argmaster/PyR3
|
6786bcb6a101fe4bd4cc50fe43767b8178504b15
|
[
"MIT"
] | null | null | null |
import sys
import typing
import bpy_types
class INFO_HT_header(bpy_types.Header, bpy_types._GenericUI):
bl_rna = None
''' '''
bl_space_type = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class INFO_MT_area(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class INFO_MT_context_menu(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class INFO_MT_editor_menus(bpy_types.Menu, bpy_types._GenericUI):
bl_idname = None
''' '''
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class INFO_MT_info(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
class INFO_MT_view(bpy_types.Menu, bpy_types._GenericUI):
bl_label = None
''' '''
bl_rna = None
''' '''
id_data = None
''' '''
def append(self, draw_func):
'''
'''
pass
def as_pointer(self):
'''
'''
pass
def bl_rna_get_subclass(self):
'''
'''
pass
def bl_rna_get_subclass_py(self):
'''
'''
pass
def draw(self, _context):
'''
'''
pass
def draw_collapsible(self, context, layout):
'''
'''
pass
def draw_preset(self, _context):
'''
'''
pass
def driver_add(self):
'''
'''
pass
def driver_remove(self):
'''
'''
pass
def get(self):
'''
'''
pass
def is_extended(self):
'''
'''
pass
def is_property_hidden(self):
'''
'''
pass
def is_property_overridable_library(self):
'''
'''
pass
def is_property_readonly(self):
'''
'''
pass
def is_property_set(self):
'''
'''
pass
def items(self):
'''
'''
pass
def keyframe_delete(self):
'''
'''
pass
def keyframe_insert(self):
'''
'''
pass
def keys(self):
'''
'''
pass
def path_from_id(self):
'''
'''
pass
def path_menu(self, searchpaths, operator, props_default, prop_filepath,
filter_ext, filter_path, display_name, add_operator):
'''
'''
pass
def path_resolve(self):
'''
'''
pass
def pop(self):
'''
'''
pass
def prepend(self, draw_func):
'''
'''
pass
def property_overridable_library_set(self):
'''
'''
pass
def property_unset(self):
'''
'''
pass
def remove(self, draw_func):
'''
'''
pass
def type_recast(self):
'''
'''
pass
def values(self):
'''
'''
pass
| 12.447464
| 76
| 0.388153
| 1,133
| 13,742
| 4.438658
| 0.065313
| 0.229668
| 0.275602
| 0.07755
| 0.97077
| 0.97077
| 0.97077
| 0.964605
| 0.964605
| 0.964605
| 0
| 0
| 0.481808
| 13,742
| 1,103
| 77
| 12.458749
| 0.706221
| 0
| 0
| 0.970667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.456
| false
| 0.456
| 0.008
| 0
| 0.530667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 11
|
e3c638a6f8f396806ce798ab4fb74b67d73b3a29
| 13,018
|
py
|
Python
|
tools/tests/test_acetz.py
|
facchinm/AceTime
|
df0e05995899cc5653ec583dbee737f53ad588ea
|
[
"MIT"
] | null | null | null |
tools/tests/test_acetz.py
|
facchinm/AceTime
|
df0e05995899cc5653ec583dbee737f53ad588ea
|
[
"MIT"
] | null | null | null |
tools/tests/test_acetz.py
|
facchinm/AceTime
|
df0e05995899cc5653ec583dbee737f53ad588ea
|
[
"MIT"
] | null | null | null |
import sys
import unittest
import logging
from datetime import datetime, timedelta, timezone
from data_types.at_types import SECONDS_SINCE_UNIX_EPOCH
from acetz import gettz as agettz, acetz
# Enable logging during unittests.
# https://stackoverflow.com/questions/7472863
logger = logging.getLogger()
logger.level = logging.DEBUG
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
def print_zs_at_dt(tz: acetz, dt: datetime) -> None:
zs = tz.zone_specifier()
zs.init_for_year(dt.year)
zs.print_matches_and_transitions()
unix_seconds = int(dt.timestamp())
epoch_seconds = unix_seconds - SECONDS_SINCE_UNIX_EPOCH
info = zs.get_timezone_info_for_seconds(epoch_seconds)
if info:
print(
f"print_zs_at_dt(): epoch_seconds={epoch_seconds} "
f"total_offset={info.total_offset} "
f"utc_offset={info.utc_offset} "
f"dst_offset={info.dst_offset} "
f"abbrev={info.abbrev} "
f"fold={info.fold}"
)
else:
print(
f"print_zs_at_dt(): epoch_seconds={epoch_seconds} "
" transition not found"
)
# @unittest.skip
class TestLosAngeles(unittest.TestCase):
def test_constructor(self) -> None:
atz = agettz('America/Los_Angeles')
adt = datetime(2000, 1, 2, 3, 4, 5, tzinfo=atz)
self.assertEqual(2000, adt.year)
self.assertEqual(1, adt.month)
self.assertEqual(2, adt.day)
self.assertEqual(3, adt.hour)
self.assertEqual(4, adt.minute)
self.assertEqual(5, adt.second)
# date +%s -d '2000-01-02T03:04:05-08:00'
self.assertEqual(946811045, int(adt.timestamp()))
adt_utcoffset = adt.utcoffset()
assert(adt_utcoffset is not None)
self.assertEqual(-8 * 3600, adt_utcoffset.total_seconds())
assert(adt.tzinfo is not None)
self.assertEqual("PST", adt.tzinfo.tzname(adt))
def test_before_spring_forward(self) -> None:
tz = agettz('America/Los_Angeles')
# One second before DST shift, 01:59:59 UTC-8
epoch_seconds = 7984799
unix_seconds = epoch_seconds + SECONDS_SINCE_UNIX_EPOCH
dtu = datetime.fromtimestamp(unix_seconds, tz=timezone.utc)
# print_zs_at_dt(tz, dtu)
# Date from epoch seconds.
dtt = dtu.astimezone(tz)
self.assertEqual(
epoch_seconds,
int(dtt.timestamp()) - SECONDS_SINCE_UNIX_EPOCH
)
self.assertEqual(2000, dtt.year)
self.assertEqual(4, dtt.month)
self.assertEqual(2, dtt.day)
self.assertEqual(1, dtt.hour)
self.assertEqual(59, dtt.minute)
self.assertEqual(59, dtt.second)
self.assertEqual("PST", dtt.tzname())
self.assertEqual(timedelta(hours=-8), dtt.utcoffset())
self.assertEqual(timedelta(hours=0), dtt.dst())
# Date from component
dtc = datetime(2000, 4, 2, 1, 59, 59, tzinfo=tz)
self.assertEqual(unix_seconds, int(dtc.timestamp()))
self.assertEqual(2000, dtc.year)
self.assertEqual(4, dtc.month)
self.assertEqual(2, dtc.day)
self.assertEqual(1, dtc.hour)
self.assertEqual(59, dtc.minute)
self.assertEqual(59, dtc.second)
self.assertEqual("PST", dtc.tzname())
self.assertEqual(timedelta(hours=-8), dtc.utcoffset())
self.assertEqual(timedelta(hours=0), dtc.dst())
self.assertEqual(dtc, dtt)
def test_after_spring_forward(self) -> None:
tz = agettz('America/Los_Angeles')
# Right after DST forward shift, 03:00:00 UTC-7
epoch_seconds = 7984800
unix_seconds = epoch_seconds + SECONDS_SINCE_UNIX_EPOCH
dtu = datetime.fromtimestamp(unix_seconds, tz=timezone.utc)
# Date from epoch seconds
dtt = dtu.astimezone(tz)
self.assertEqual(unix_seconds, int(dtt.timestamp()))
self.assertEqual(2000, dtt.year)
self.assertEqual(4, dtt.month)
self.assertEqual(2, dtt.day)
self.assertEqual(3, dtt.hour)
self.assertEqual(0, dtt.minute)
self.assertEqual(0, dtt.second)
self.assertEqual("PDT", dtt.tzname())
self.assertEqual(timedelta(hours=-7), dtt.utcoffset())
self.assertEqual(timedelta(hours=1), dtt.dst())
# Date from component
dtc = datetime(2000, 4, 2, 3, 0, 0, tzinfo=tz)
self.assertEqual(unix_seconds, int(dtc.timestamp()))
self.assertEqual(2000, dtc.year)
self.assertEqual(4, dtc.month)
self.assertEqual(2, dtc.day)
self.assertEqual(3, dtc.hour)
self.assertEqual(0, dtc.minute)
self.assertEqual(0, dtc.second)
self.assertEqual("PDT", dtc.tzname())
self.assertEqual(timedelta(hours=-7), dtc.utcoffset())
self.assertEqual(timedelta(hours=1), dtc.dst())
self.assertEqual(dtc, dtt)
def test_before_fall_back(self) -> None:
tz = agettz('America/Los_Angeles')
# One second before DST shift, 01:59:59 UTC-7
epoch_seconds = 26125199
unix_seconds = epoch_seconds + SECONDS_SINCE_UNIX_EPOCH
dtu = datetime.fromtimestamp(unix_seconds, tz=timezone.utc)
# Date from epoch seconds. By default, should match the 1st transition.
dtt = dtu.astimezone(tz)
self.assertEqual(
epoch_seconds,
int(dtt.timestamp()) - SECONDS_SINCE_UNIX_EPOCH
)
self.assertEqual(2000, dtt.year)
self.assertEqual(10, dtt.month)
self.assertEqual(29, dtt.day)
self.assertEqual(1, dtt.hour)
self.assertEqual(59, dtt.minute)
self.assertEqual(59, dtt.second)
self.assertEqual("PDT", dtt.tzname())
self.assertEqual(timedelta(hours=-7), dtt.utcoffset())
self.assertEqual(timedelta(hours=1), dtt.dst())
# Date from component. With fold=0, should match the 1st transition.
dtc = datetime(2000, 10, 29, 1, 59, 59, tzinfo=tz)
self.assertEqual(unix_seconds, int(dtc.timestamp()))
self.assertEqual(2000, dtc.year)
self.assertEqual(10, dtc.month)
self.assertEqual(29, dtc.day)
self.assertEqual(1, dtc.hour)
self.assertEqual(59, dtc.minute)
self.assertEqual(59, dtc.second)
self.assertEqual("PDT", dtc.tzname())
self.assertEqual(timedelta(hours=-7), dtc.utcoffset())
self.assertEqual(timedelta(hours=1), dtc.dst())
# Test the second transition with fold=1
dtc = datetime(2000, 10, 29, 1, 59, 59, tzinfo=tz, fold=1)
self.assertEqual(unix_seconds + 3600, int(dtc.timestamp()))
self.assertEqual(2000, dtc.year)
self.assertEqual(10, dtc.month)
self.assertEqual(29, dtc.day)
self.assertEqual(1, dtc.hour)
self.assertEqual(59, dtc.minute)
self.assertEqual(59, dtc.second)
self.assertEqual("PST", dtc.tzname())
self.assertEqual(timedelta(hours=-8), dtc.utcoffset())
self.assertEqual(timedelta(hours=0), dtc.dst())
def test_after_fall_back(self) -> None:
tz = agettz('America/Los_Angeles')
# Just after DST fall back 01:00:00 UTC-8
epoch_seconds = 26125200
unix_seconds = epoch_seconds + SECONDS_SINCE_UNIX_EPOCH
dtu = datetime.fromtimestamp(unix_seconds, tz=timezone.utc)
# Date from epoch seconds.
dtt = dtu.astimezone(tz)
self.assertEqual(
epoch_seconds,
int(dtt.timestamp()) - SECONDS_SINCE_UNIX_EPOCH
)
self.assertEqual(2000, dtt.year)
self.assertEqual(10, dtt.month)
self.assertEqual(29, dtt.day)
self.assertEqual(1, dtt.hour)
self.assertEqual(0, dtt.minute)
self.assertEqual(0, dtt.second)
self.assertEqual("PST", dtt.tzname())
self.assertEqual(timedelta(hours=-8), dtt.utcoffset())
self.assertEqual(timedelta(hours=0), dtt.dst())
# Date from component
dtc = datetime(2000, 10, 29, 1, 0, 0, tzinfo=tz, fold=1)
self.assertEqual(unix_seconds, int(dtc.timestamp()))
self.assertEqual(2000, dtc.year)
self.assertEqual(10, dtc.month)
self.assertEqual(29, dtc.day)
self.assertEqual(1, dtc.hour)
self.assertEqual(0, dtc.minute)
self.assertEqual(0, dtc.second)
self.assertEqual("PST", dtc.tzname())
self.assertEqual(timedelta(hours=-8), dtc.utcoffset())
self.assertEqual(timedelta(hours=0), dtc.dst())
self.assertEqual(dtc, dtt)
def test_way_after_fall_back(self) -> None:
tz = agettz('America/Los_Angeles')
# Just after DST fall back 02:00:00 UTC-8
epoch_seconds = 26125200 + 3600
unix_seconds = epoch_seconds + SECONDS_SINCE_UNIX_EPOCH
dtu = datetime.fromtimestamp(unix_seconds, tz=timezone.utc)
# Date from epoch seconds.
dtt = dtu.astimezone(tz)
self.assertEqual(unix_seconds, int(dtt.timestamp()))
self.assertEqual(2000, dtt.year)
self.assertEqual(10, dtt.month)
self.assertEqual(29, dtt.day)
self.assertEqual(2, dtt.hour)
self.assertEqual(0, dtt.minute)
self.assertEqual(0, dtt.second)
self.assertEqual("PST", dtt.tzname())
self.assertEqual(timedelta(hours=-8), dtt.utcoffset())
self.assertEqual(timedelta(hours=0), dtt.dst())
# Date from component
dtc = datetime(2000, 10, 29, 2, 0, 0, tzinfo=tz)
self.assertEqual(unix_seconds, int(dtc.timestamp()))
self.assertEqual(2000, dtc.year)
self.assertEqual(10, dtc.month)
self.assertEqual(29, dtc.day)
self.assertEqual(2, dtc.hour)
self.assertEqual(0, dtc.minute)
self.assertEqual(0, dtc.second)
self.assertEqual("PST", dtc.tzname())
self.assertEqual(timedelta(hours=-8), dtc.utcoffset())
self.assertEqual(timedelta(hours=0), dtc.dst())
self.assertEqual(dtc, dtt)
# @unittest.skip
class TestTunis(unittest.TestCase):
def test_2006_01_01(self) -> None:
tz = agettz('Africa/Tunis')
epoch_seconds = 189385200
unix_seconds = epoch_seconds + SECONDS_SINCE_UNIX_EPOCH
dtu = datetime.fromtimestamp(unix_seconds, tz=timezone.utc)
# print_zs_at_dt(tz, dtu)
# Date from epoch seconds.
dtt = dtu.astimezone(tz)
self.assertEqual(
epoch_seconds,
int(dtt.timestamp()) - SECONDS_SINCE_UNIX_EPOCH
)
self.assertEqual(2006, dtt.year)
self.assertEqual(1, dtt.month)
self.assertEqual(1, dtt.day)
self.assertEqual(0, dtt.hour)
self.assertEqual(0, dtt.minute)
self.assertEqual(0, dtt.second)
self.assertEqual("CET", dtt.tzname())
self.assertEqual(timedelta(hours=1), dtt.utcoffset())
self.assertEqual(timedelta(hours=0), dtt.dst())
# Date from component
dtc = datetime(2006, 1, 1, 0, 0, 0, tzinfo=tz)
self.assertEqual(unix_seconds, int(dtc.timestamp()))
self.assertEqual(2006, dtc.year)
self.assertEqual(1, dtc.month)
self.assertEqual(1, dtc.day)
self.assertEqual(0, dtc.hour)
self.assertEqual(0, dtc.minute)
self.assertEqual(0, dtc.second)
self.assertEqual("CET", dtc.tzname())
self.assertEqual(timedelta(hours=1), dtc.utcoffset())
self.assertEqual(timedelta(hours=0), dtc.dst())
self.assertEqual(dtc, dtt)
class TestSydney(unittest.TestCase):
def test_2000_03_26_after_fall_back(self) -> None:
tz = agettz('Australia/Sydney')
epoch_seconds = 7315200
unix_seconds = epoch_seconds + SECONDS_SINCE_UNIX_EPOCH
dtu = datetime.fromtimestamp(unix_seconds, tz=timezone.utc)
# print_zs_at_dt(tz, dtu)
# Date from epoch seconds.
dtt = dtu.astimezone(tz)
self.assertEqual(
epoch_seconds,
int(dtt.timestamp()) - SECONDS_SINCE_UNIX_EPOCH
)
self.assertEqual(2000, dtt.year)
self.assertEqual(3, dtt.month)
self.assertEqual(26, dtt.day)
self.assertEqual(2, dtt.hour)
self.assertEqual(0, dtt.minute)
self.assertEqual(0, dtt.second)
self.assertEqual("AEST", dtt.tzname())
self.assertEqual(timedelta(hours=10), dtt.utcoffset())
self.assertEqual(timedelta(hours=0), dtt.dst())
# Date from component
dtc = datetime(2000, 3, 26, 2, 0, 0, tzinfo=tz, fold=1)
self.assertEqual(unix_seconds, int(dtc.timestamp()))
self.assertEqual(2000, dtc.year)
self.assertEqual(3, dtc.month)
self.assertEqual(26, dtc.day)
self.assertEqual(2, dtc.hour)
self.assertEqual(0, dtc.minute)
self.assertEqual(0, dtc.second)
self.assertEqual("AEST", dtc.tzname())
self.assertEqual(timedelta(hours=10), dtc.utcoffset())
self.assertEqual(timedelta(hours=0), dtc.dst())
self.assertEqual(dtc, dtt)
| 36.878187
| 79
| 0.633738
| 1,625
| 13,018
| 4.974154
| 0.096615
| 0.306198
| 0.089076
| 0.107633
| 0.791414
| 0.773475
| 0.753804
| 0.743288
| 0.735989
| 0.7277
| 0
| 0.046316
| 0.242049
| 13,018
| 352
| 80
| 36.982955
| 0.772879
| 0.067368
| 0
| 0.640741
| 0
| 0
| 0.03608
| 0.012054
| 0
| 0
| 0
| 0
| 0.618519
| 1
| 0.033333
| false
| 0
| 0.022222
| 0
| 0.066667
| 0.022222
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58133f9def6986ff00500363dd11226e2c56889d
| 7,734
|
py
|
Python
|
autoreg/data_streamers.py
|
zhenwendai/RGP
|
be679607d3457a1038a2fe39b36b816ea380ea39
|
[
"BSD-3-Clause"
] | 17
|
2016-10-24T01:31:30.000Z
|
2021-07-31T08:12:02.000Z
|
autoreg/data_streamers.py
|
zhenwendai/RGP
|
be679607d3457a1038a2fe39b36b816ea380ea39
|
[
"BSD-3-Clause"
] | null | null | null |
autoreg/data_streamers.py
|
zhenwendai/RGP
|
be679607d3457a1038a2fe39b36b816ea380ea39
|
[
"BSD-3-Clause"
] | 11
|
2017-07-11T09:11:48.000Z
|
2022-01-25T12:10:48.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import abc
import numpy as np
import warnings
class DataStreamerTemplate(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def next_minibatch(self, ):
"""
"""
return None
@abc.abstractproperty
def get_cur_index(self, ):
"""
"""
@abc.abstractproperty
def minibatch_size(self, ):
"""
"""
return None
@abc.abstractproperty
def total_size(self, ):
"""
"""
return None
class TrivialDataStreamer(DataStreamerTemplate):
"""
This trivial data_streamer returns all the data each iteration.
"""
def __init__(self, Y, X ):
"""
"""
if Y is not None and isinstance(Y, np.ndarray): Y = [Y,]
if X is not None and isinstance(X, np.ndarray): X = [X,]
assert len(Y) == len(X), "Input and output size must match"
self.iterations_started = False
self.minibatch_size=len(Y)
self.next_minibatch_start_idx = 0
self.minibatch_index = 0
self.last_in_epoch = False
self.Y = Y
self.X = X
self.total_size = len(Y)
def next_minibatch(self,):
"""
"""
self.iterations_started = True
self.minibatch_index += 1
st_idx = self.next_minibatch_start_idx
if (self.next_minibatch_start_idx + self.minibatch_size) < self.total_size:
end_idx = self.next_minibatch_start_idx + self.minibatch_size
self.last_in_epoch = False
else:
end_idx = np.min( (self.next_minibatch_start_idx + self.minibatch_size, self.total_size) )
self.last_in_epoch = True
Y_out = self.Y[st_idx:end_idx]
X_out = self.X[st_idx:end_idx]
minibatch_index_out = self.minibatch_index
if self.last_in_epoch:
self.next_minibatch_start_idx = 0
self.minibatch_index = 0
return minibatch_index_out, range(len(self.Y)), Y_out, X_out
def get_cur_index(self, ):
"""
"""
return self.minibatch_index, range(self.minibatch_size)
def minibatch_size(self, ):
"""
"""
return self.minibatch_size
def total_size(self, ):
"""
"""
return self.total_size
def minibatch_last_in_epoch(self,):
"""
"""
return None
class RandomPermutationDataStreamer(DataStreamerTemplate):
"""
This trivial data_streamer returns random permutation at each iteration.
"""
def __init__(self, Y, X ):
"""
"""
if Y is not None and isinstance(Y, np.ndarray):
Y = [Y,]
warnings.warn("Input has only one sequence. No permutation functionality will be used.", RuntimeWarning)
if X is not None and isinstance(X, np.ndarray):
X = [X,]
assert len(Y) == len(X), "Input and output size must match"
self.iterations_started = False
self.minibatch_size=len(Y)
self.next_minibatch_start_idx = 0
self.minibatch_index = 0
self.previous_indexes_out = None
self.last_in_epoch = False
self.Y = Y
self.X = X
self.total_size = len(Y)
def next_minibatch(self,):
"""
"""
import random
self.iterations_started = True
self.minibatch_index += 1
st_idx = self.next_minibatch_start_idx
if (self.next_minibatch_start_idx + self.minibatch_size) < self.total_size:
end_idx = self.next_minibatch_start_idx + self.minibatch_size
self.last_in_epoch = False
else:
end_idx = np.min( (self.next_minibatch_start_idx + self.minibatch_size, self.total_size) )
self.last_in_epoch = True
Y_out = self.Y[st_idx:end_idx]
X_out = self.X[st_idx:end_idx]
rand_inds = random.sample(range(len(Y_out)),len(Y_out))
self.previous_indexes_out = rand_inds[:] # copying
Y_out = [ Y_out[i] for i in rand_inds ]
X_out = [ X_out[i] for i in rand_inds ]
minibatch_index_out = self.minibatch_index
if self.last_in_epoch:
self.next_minibatch_start_idx = 0
self.minibatch_index = 0
return minibatch_index_out, rand_inds, Y_out, X_out
def get_cur_index(self, ):
"""
"""
return self.minibatch_index, self.previous_indexes_out
def minibatch_size(self, ):
"""
"""
return self.minibatch_size
def total_size(self, ):
"""
"""
return self.total_size
def minibatch_last_in_epoch(self,):
"""
"""
return None
class StdMemoryDataStreamer(DataStreamerTemplate):
"""
This is a standard data_streamer for the data which fits into memorys.
Data is assumed to be in lists.
"""
def __init__(self, Y, X, minibatch_size ):
"""
"""
if Y is not None and isinstance(Y, np.ndarray):
Y = [Y,]
warnings.warn("Input has only one sequence. No permutation functionality will be used.", RuntimeWarning)
if X is not None and isinstance(X, np.ndarray):
X = [X,]
assert len(Y) == len(X), "Input and output size must match"
assert minibatch_size <= len(Y), "Minibatch size must be less than the data size."
self.iterations_started = False
self.minibatch_size=minibatch_size
self.next_minibatch_start_idx = 0
self.minibatch_index = 0
self.last_in_epoch = False
self.previous_indexes_out = None
self.Y = Y
self.X = X
self.total_size = len(Y)
def next_minibatch(self,):
"""
"""
#import pdb; pdb.set_trace()
self.iterations_started = True
self.minibatch_index += 1
st_idx = self.next_minibatch_start_idx
if (self.next_minibatch_start_idx + self.minibatch_size) < self.total_size:
end_idx = self.next_minibatch_start_idx + self.minibatch_size
self.last_in_epoch = False
else:
end_idx = np.min( (self.next_minibatch_start_idx + self.minibatch_size, self.total_size) )
self.last_in_epoch = True
Y_out = self.Y[st_idx:end_idx]
X_out = self.X[st_idx:end_idx]
indexes_out = range(st_idx,end_idx)
self.previous_indexes_out = indexes_out[:] # copying
minibatch_index_out = self.minibatch_index
if self.last_in_epoch:
self.next_minibatch_start_idx = 0
self.minibatch_index = 0
else:
self.next_minibatch_start_idx += self.minibatch_size
return minibatch_index_out, indexes_out, Y_out, X_out
def get_cur_index(self, ):
"""
"""
return self.minibatch_index, self.previous_indexes_out
def minibatch_size(self, ):
"""
"""
return self.minibatch_size
def total_size(self, ):
"""
"""
return self.total_size
def minibatch_last_in_epoch(self,):
"""
"""
return None
| 26.668966
| 116
| 0.550944
| 903
| 7,734
| 4.435216
| 0.122924
| 0.10387
| 0.080649
| 0.10437
| 0.822222
| 0.806492
| 0.752559
| 0.732834
| 0.722347
| 0.722347
| 0
| 0.003436
| 0.360357
| 7,734
| 290
| 117
| 26.668966
| 0.806145
| 0.071244
| 0
| 0.824324
| 0
| 0
| 0.040784
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 1
| 0.148649
| false
| 0
| 0.027027
| 0
| 0.331081
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
582c7eef5a382dc6fb95b190b0f8a872b7f97db7
| 8,895
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_downpoursgd.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | 1
|
2020-02-26T13:44:57.000Z
|
2020-02-26T13:44:57.000Z
|
python/paddle/fluid/tests/unittests/test_downpoursgd.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_downpoursgd.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testcases for Downpour."""
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import os
import signal
import subprocess
import time
import unittest
import sys
from op_test import OpTest
from paddle.fluid.trainer_desc import DistMultiTrainer
from paddle.fluid.device_worker import DownpourSGD, DownpourSGDOPT
from paddle.fluid.incubate.fleet.parameter_server.pslib.node import DownpourWorker
from google.protobuf import text_format
import paddle.fluid.incubate.fleet.parameter_server.pslib.ps_pb2 as pslib
from paddle.fluid.trainer_factory import TrainerFactory
class TestListenAndServOp(unittest.TestCase):
"""TestListenAndServOp."""
def setUp(self):
pass
def test_device_work_use_cvm(self):
"""test device work use_cvm."""
if sys.platform == 'win32' or sys.platform == 'sys.platform':
pass
else:
print(sys.platform)
cmd = "wget --no-check-certificate https://pslib.bj.bcebos.com/fleet_desc.prototxt"
os.system(cmd)
x = fluid.layers.data(name='x', shape=[1], dtype='int64')
x_emb = fluid.layers.embedding(
input=x, size=[1, 2], is_distributed=True)
y_predict = fluid.layers.fc(input=x_emb, size=1, act=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
ps_param = pslib.PSParameter()
with open("fleet_desc.prototxt") as f:
text_format.Merge(f.read(), ps_param)
fleet_desc = ps_param
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
opt_info = {}
main_program = fluid.default_main_program()
program_id = str(id(avg_cost.block.program))
program_configs = {}
program_configs[program_id] = {
"pull_sparse": [0],
"push_sparse": [0]
}
program_configs[program_id]["pull_dense"] = [1]
program_configs[program_id]["push_dense"] = [1]
worker_skipped_ops = ["lookup_table", "lookup_table_grad"]
opt_info["program_configs"] = program_configs
opt_info["trainer"] = "DistMultiTrainer"
opt_info["device_worker"] = "DownpourSGD"
opt_info["optimizer"] = "DownpourSGD"
opt_info["fleet_desc"] = ps_param
opt_info["worker_skipped_ops"] = worker_skipped_ops
opt_info["use_cvm"] = True
opt_info["scale_datanorm"] = -1
opt_info["dump_slot"] = False
opt_info["stat_var_names"] = []
worker = DownpourWorker(None)
worker.get_desc().CopyFrom(ps_param.trainer_param[0])
opt_info["program_id_to_worker"] = {program_id: worker}
main_program._fleet_opt = opt_info
trainer = TrainerFactory()._create_trainer(main_program._fleet_opt)
trainer._set_program(main_program)
trainer._gen_trainer_desc()
cmd = "rm fleet_desc.prototxt*"
os.system(cmd)
def test_device_work(self):
"""test devicve worker."""
if sys.platform == 'win32' or sys.platform == 'sys.platform':
pass
else:
print(sys.platform)
cmd = "wget --no-check-certificate https://pslib.bj.bcebos.com/fleet_desc.prototxt"
os.system(cmd)
x = fluid.layers.data(name='x', shape=[1], dtype='int64')
x_emb = fluid.layers.embedding(
input=x, size=[1, 2], is_distributed=True)
y_predict = fluid.layers.fc(input=x_emb, size=1, act=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
ps_param = pslib.PSParameter()
with open("fleet_desc.prototxt") as f:
text_format.Merge(f.read(), ps_param)
fleet_desc = ps_param
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
opt_info = {}
main_program = fluid.default_main_program()
program_id = str(id(avg_cost.block.program))
program_configs = {}
program_configs[program_id] = {
"pull_sparse": [0],
"push_sparse": [0]
}
program_configs[program_id]["pull_dense"] = [1]
program_configs[program_id]["push_dense"] = [1]
worker_skipped_ops = ["lookup_table", "lookup_table_grad"]
opt_info["program_configs"] = program_configs
opt_info["trainer"] = "DistMultiTrainer"
opt_info["device_worker"] = "DownpourSGD"
opt_info["optimizer"] = "DownpourSGD"
opt_info["fleet_desc"] = ps_param
opt_info["worker_skipped_ops"] = worker_skipped_ops
opt_info["use_cvm"] = False
opt_info["scale_datanorm"] = -1
opt_info["dump_slot"] = False
opt_info["stat_var_names"] = []
worker = DownpourWorker(None)
worker.get_desc().CopyFrom(ps_param.trainer_param[0])
opt_info["program_id_to_worker"] = {program_id: worker}
main_program._fleet_opt = opt_info
trainer = TrainerFactory()._create_trainer(main_program._fleet_opt)
trainer._set_program(main_program)
trainer._gen_trainer_desc()
cmd = "rm fleet_desc.prototxt*"
os.system(cmd)
def test_downpour_opt_work(self):
"""test devicve worker."""
if sys.platform == 'win32' or sys.platform == 'sys.platform':
pass
else:
print(sys.platform)
cmd = "wget --no-check-certificate https://pslib.bj.bcebos.com/fleet_desc.prototxt"
os.system(cmd)
x = fluid.layers.data(name='x', shape=[1], dtype='int64')
x_emb = fluid.layers.embedding(
input=x, size=[1, 2], is_distributed=True)
y_predict = fluid.layers.fc(input=x_emb, size=1, act=None)
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
ps_param = pslib.PSParameter()
with open("fleet_desc.prototxt") as f:
text_format.Merge(f.read(), ps_param)
fleet_desc = ps_param
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
opt_info = {}
main_program = fluid.default_main_program()
program_id = str(id(avg_cost.block.program))
program_configs = {}
program_configs[program_id] = {
"pull_sparse": [0],
"push_sparse": [0]
}
program_configs[program_id]["pull_dense"] = [1]
program_configs[program_id]["push_dense"] = [1]
worker_skipped_ops = ["lookup_table", "lookup_table_grad"]
opt_info["program_configs"] = program_configs
opt_info["trainer"] = "DistMultiTrainer"
opt_info["device_worker"] = "DownpourSGDOPT"
opt_info["optimizer"] = "DownpourSGD"
opt_info["fleet_desc"] = ps_param
opt_info["worker_skipped_ops"] = worker_skipped_ops
opt_info["use_cvm"] = False
opt_info["scale_datanorm"] = -1
opt_info["dump_slot"] = False
opt_info["stat_var_names"] = []
worker = DownpourWorker(None)
worker.get_desc().CopyFrom(ps_param.trainer_param[0])
opt_info["program_id_to_worker"] = {program_id: worker}
main_program._fleet_opt = opt_info
trainer = TrainerFactory()._create_trainer(main_program._fleet_opt)
trainer._set_program(main_program)
trainer._gen_trainer_desc()
cmd = "rm fleet_desc.prototxt*"
os.system(cmd)
if __name__ == "__main__":
unittest.main()
| 41.957547
| 95
| 0.608319
| 1,060
| 8,895
| 4.829245
| 0.189623
| 0.053331
| 0.061535
| 0.040438
| 0.801524
| 0.79371
| 0.79371
| 0.776519
| 0.776519
| 0.776519
| 0
| 0.009355
| 0.278921
| 8,895
| 211
| 96
| 42.156398
| 0.788743
| 0.078583
| 0
| 0.836257
| 0
| 0
| 0.145869
| 0.00809
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023392
| false
| 0.023392
| 0.093567
| 0
| 0.122807
| 0.023392
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58354576f02966e07603294ad8f7a7a01c21bf4c
| 2,954
|
py
|
Python
|
project-euler-solutions/p8/euler8.py
|
stravajiaxen/project-euler-solutions
|
1dcb8f843537bec58af7dafe4a24856cbbbef340
|
[
"MIT"
] | null | null | null |
project-euler-solutions/p8/euler8.py
|
stravajiaxen/project-euler-solutions
|
1dcb8f843537bec58af7dafe4a24856cbbbef340
|
[
"MIT"
] | null | null | null |
project-euler-solutions/p8/euler8.py
|
stravajiaxen/project-euler-solutions
|
1dcb8f843537bec58af7dafe4a24856cbbbef340
|
[
"MIT"
] | null | null | null |
"""
Copyright Matt DeMartino (Stravajiaxen)
Licensed under MIT License -- do whatever you want with this, just don't sue me!
This code attempts to solve Project Euler (projecteuler.net)
Problem #8 Largest product in a series
The four adjacent digits in the 1000-digit number that have the
greatest product are 9 * 9 * 8 * 9 = 5832.
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
Find the thirteen adjacent digits in the 1000-digit number
that have the greatest product. What is the value of this product?
"""
def product(*args):
tot = 1
for a in args:
tot *= a
return tot
def main():
giant_num = \
"""
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
"""
giant_num = "".join([x.strip() for x in giant_num]) # Join it all up
prods = []
for last_digit in range(13, 1000):
current_num = giant_num[last_digit-13:last_digit]
prods.append(product(*[int(i) for i in current_num]))
print(max(prods))
if __name__ == "__main__":
main()
| 37.392405
| 80
| 0.881855
| 182
| 2,954
| 14.21978
| 0.549451
| 0.012365
| 0.012365
| 0.014683
| 0.819165
| 0.819165
| 0.819165
| 0.819165
| 0.819165
| 0.819165
| 0
| 0.751763
| 0.087678
| 2,954
| 78
| 81
| 37.871795
| 0.208534
| 0.505755
| 0
| 0
| 0
| 0
| 0.019002
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0
| 0
| 0.2
| 0.066667
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
58460ca6e58278e4a7cea2be21625148ca808024
| 9,818
|
py
|
Python
|
generateData.py
|
jorgemauricio/validacionclimmapcore
|
544be21b81d02982321c18ae172b252e32039ec4
|
[
"MIT"
] | null | null | null |
generateData.py
|
jorgemauricio/validacionclimmapcore
|
544be21b81d02982321c18ae172b252e32039ec4
|
[
"MIT"
] | null | null | null |
generateData.py
|
jorgemauricio/validacionclimmapcore
|
544be21b81d02982321c18ae172b252e32039ec4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 16:17:25 2017
@author: jorgemauricio
"""
#%% librerias
import pandas as pd
import os
import math
import numpy as np
#%% Clear terminal
os.system('clear')
#%% Obtener todos los archivos en data
filesList = [x for x in os.listdir('data') if x.endswith('.csv')]
#%% generate info for AGS
#%% -101.62 > Long > -103.09
#%% 22.58 > Lat > 21.54
for i in filesList:
print('***** Processing Ags: {}'.format(i))
fileTitle = 'data/{}'.format(i)
date, ext = i.split(".")
tYear, tMonth, tDay = date.split("-")
temporalData = pd.read_csv(fileTitle)
temporalData = temporalData.filter(['Long', 'Lat', 'Rain', 'Hr', 'Tpro'], axis=1)
temporalData = temporalData.loc[temporalData['Long'] > -103.09]
temporalData = temporalData.loc[temporalData['Long'] < -101.62]
temporalData = temporalData.loc[temporalData['Lat'] > 21.54]
temporalData = temporalData.loc[temporalData['Lat'] < 22.58]
temporalData['Year'] = int(tYear)
temporalData['Month'] = int(tMonth)
temporalData['Day'] = int(tDay)
processingFileTitle = 'ags/{}'.format(i)
temporalData.to_csv(processingFileTitle, index=False)
#%% generate info for Sonora
#%% -107.57 > Long > -115.61
#%% 33.02 > Lat > 25.70
for i in filesList:
print('***** Processing Sonora: {}'.format(i))
fileTitle = 'data/{}'.format(i)
date, ext = i.split(".")
tYear, tMonth, tDay = date.split("-")
temporalData = pd.read_csv(fileTitle)
temporalData = temporalData.filter(['Long', 'Lat', 'Rain', 'Hr', 'Tpro'], axis=1)
temporalData = temporalData.loc[temporalData['Long'] > -115.61]
temporalData = temporalData.loc[temporalData['Long'] < -107.57]
temporalData = temporalData.loc[temporalData['Lat'] > 25.70]
temporalData = temporalData.loc[temporalData['Lat'] < 33.02]
temporalData['Year'] = int(tYear)
temporalData['Month'] = int(tMonth)
temporalData['Day'] = int(tDay)
processingFileTitle = 'sonora/{}'.format(i)
temporalData.to_csv(processingFileTitle, index=False)
#%% Processing Ags
print('***** Processing Weather Stations from Aguascalientes \n')
#%% Read data
agsWeatherStations = pd.read_csv('dataStations/aguascalientes_2017.csv')
#%% Drop NA values from the rows
agsWeatherStations = agsWeatherStations.dropna()
#%% Final data base structure
dataBaseStructure = "Station,Type,State,Lat,Long,Year,Month,Day,Rain,Hr,Tpro" + "\n"
dataBaseStructuretTest = "Station,State,Lat,Long,Year,Month,Day,Rain,Hr,Tpro,RainWRF,HrWRF,TproWRF" + "\n"
#%% iterate ags
for index, row in agsWeatherStations.iterrows():
# generate title for csv file from the WRF
monthTitle = "{}".format(int(row['Month']))
dayTitle = "{}".format(int(row['Day']))
if len(monthTitle) == 1:
monthTitle = "0" + monthTitle
if len(dayTitle) == 1:
dayTitle = "0" + dayTitle
print('***** Processing file of Ags: {}-{}-{}'.format(int(row['Year']),monthTitle,dayTitle))
temporalFileTitle = 'ags/{}-{}-{}.csv'.format(int(row['Year']),monthTitle,dayTitle)
dataWRF = pd.read_csv(temporalFileTitle)
#%% generate np arrays
Lat = np.array(dataWRF['Lat'])
Long = np.array(dataWRF['Long'])
Year = np.array(dataWRF['Year'])
Month = np.array(dataWRF['Month'])
Day = np.array(dataWRF['Day'])
Rain = np.array(dataWRF['Rain'])
Hr = np.array(dataWRF['Hr'])
Tpro = np.array(dataWRF['Tpro'])
# Point to evaluate
pointLat = row['Lat']
pointLong = row['Long']
pointNumber = row['Number']
pointYear = row['Year']
pointMonth = row['Month']
pointDay = row['Day']
pointRain = row['Rain']
pointHr = row['Hr']
pointTpro = row['Tpro']
# distances
d1 = 0.0
d2 = 0.0
d3 = 0.0
pointIndex1 = 0.0
pointIndex2 = 0.0
pointIndex3 = 0.0
# Select the 3 points to interpolate
for i in range(len(Lat)):
distanceBetweenPoints = 0.0
differenceX = pointLong - Long[i]
differenceY = pointLat - Lat[i]
sumDifferenceXY = pow(differenceX, 2.0) + pow(differenceY, 2.0)
distanceBetweenPoints = math.sqrt(sumDifferenceXY)
if i == 0:
d1 = distanceBetweenPoints
pointIndex1 = i
d2 = distanceBetweenPoints
pointIndex2 = i
d3 = distanceBetweenPoints
pointIndex3 = i
if distanceBetweenPoints < d1:
d3 = d2
pointIndex3 = pointIndex2
d2 = d1
pointIndex2 = pointIndex1
d1 = distanceBetweenPoints
pointIndex1 = i
if distanceBetweenPoints > d1 and distanceBetweenPoints < d2:
d3 = d2
pointIndex3 = pointIndex2
d2 = distanceBetweenPoints
pointIndex2 = i
if distanceBetweenPoints > d2 and distanceBetweenPoints < d3:
d3 = distanceBetweenPoints
pointIndex3 = i
# Interpolate data
k = 2.0
w1 = 0.0
w2 = 0.0
w3 = 0.0
zTpro = 0.0
zRain = 0.0
zHr = 0.0
inverseSum = pow((1 / d1),k) + pow((1 / d2),k) + pow((1 / d3),k)
w1 = 1 / pow(d1,k) / inverseSum
w2 = 1 / pow(d2,k) / inverseSum
w3 = 1 / pow(d3,k) / inverseSum
zTpro = (w1 * Tpro[pointIndex1]) + (w2 * Tpro[pointIndex2]) + (w3 * Tpro[pointIndex3])
zRain = (w1 * Rain[pointIndex1]) + (w2 * Rain[pointIndex2]) + (w3 * Rain[pointIndex3])
zHr = (w1 * Hr[pointIndex1]) + (w2 * Hr[pointIndex2]) + (w3 * Hr[pointIndex3])
# structure 1
dataBaseStructure += '{},{},{},{},{},{},{},{},{},{},{}\n'.format(pointNumber, 'Station', 'AGS', pointLat, pointLat, pointYear, pointMonth, pointDay, pointRain, pointHr, pointTpro)
dataBaseStructure += '{},{},{},{},{},{},{},{},{},{},{}\n'.format(pointNumber, 'WRF', 'AGS', pointLat, pointLat, pointYear, pointMonth, pointDay, zRain, zHr, zTpro)
# structure 2
dataBaseStructuretTest += '{},{},{},{},{},{},{},{},{},{},{},{},{}\n'.format(pointNumber, 'AGS', pointLat, pointLat, pointYear, pointMonth, pointDay, pointRain, pointHr, pointTpro, zRain, zHr, zTpro)
#%% Save to csv data 1
fileName = 'dataFromAguascalientes.csv'
textFile = open(fileName, "w")
textFile.write(dataBaseStructure)
textFile.close()
#%% Save to csv data 2
fileName = 'dataFromAguascalientestTest.csv'
textFile = open(fileName, "w")
textFile.write(dataBaseStructuretTest)
textFile.close()
#%% Processing Sonora
print('***** Processing Weather Stations from Sonora \n')
#%% Read data
sonoraWeatherStations = pd.read_csv('dataStations/sonora_2017.csv')
#%% Drop NA values from the rows
sonoraWeatherStations = sonoraWeatherStations.dropna()
#%% Final data base structure
dataBaseStructure = "Station,Type,State,Lat,Long,Year,Month,Day,Rain,Hr,Tpro" + "\n"
dataBaseStructuretTest = "Station,State,Lat,Long,Year,Month,Day,Rain,Hr,Tpro,RainWRF,HrWRF,TproWRF" + "\n"
#%% iterate Sonora
for index, row in sonoraWeatherStations.iterrows():
# generate title for csv file from the WRF
monthTitle = "{}".format(int(row['Month']))
dayTitle = "{}".format(int(row['Day']))
if len(monthTitle) == 1:
monthTitle = "0" + monthTitle
if len(dayTitle) == 1:
dayTitle = "0" + dayTitle
print('***** Processing file of Sonora: {}-{}-{}'.format(int(row['Year']),monthTitle,dayTitle))
temporalFileTitle = 'sonora/{}-{}-{}.csv'.format(int(row['Year']),monthTitle,dayTitle)
dataWRF = pd.read_csv(temporalFileTitle)
#%% generate np arrays
Lat = np.array(dataWRF['Lat'])
Long = np.array(dataWRF['Long'])
Year = np.array(dataWRF['Year'])
Month = np.array(dataWRF['Month'])
Day = np.array(dataWRF['Day'])
Rain = np.array(dataWRF['Rain'])
Hr = np.array(dataWRF['Hr'])
Tpro = np.array(dataWRF['Tpro'])
# Point to evaluate
pointLat = row['Lat']
pointLong = row['Long']
pointNumber = row['Number']
pointYear = row['Year']
pointMonth = row['Month']
pointDay = row['Day']
pointRain = row['Rain']
pointHr = row['Hr']
pointTpro = row['Tpro']
# distances
d1 = 0.0
d2 = 0.0
d3 = 0.0
pointIndex1 = 0.0
pointIndex2 = 0.0
pointIndex3 = 0.0
# Select the 3 points to interpolate
for i in range(len(Lat)):
distanceBetweenPoints = 0.0
differenceX = pointLong - Long[i]
differenceY = pointLat - Lat[i]
sumDifferenceXY = pow(differenceX, 2.0) + pow(differenceY, 2.0)
distanceBetweenPoints = math.sqrt(sumDifferenceXY)
if i == 0:
d1 = distanceBetweenPoints
pointIndex1 = i
d2 = distanceBetweenPoints
pointIndex2 = i
d3 = distanceBetweenPoints
pointIndex3 = i
if distanceBetweenPoints < d1:
d3 = d2
pointIndex3 = pointIndex2
d2 = d1
pointIndex2 = pointIndex1
d1 = distanceBetweenPoints
pointIndex1 = i
if distanceBetweenPoints > d1 and distanceBetweenPoints < d2:
d3 = d2
pointIndex3 = pointIndex2
d2 = distanceBetweenPoints
pointIndex2 = i
if distanceBetweenPoints > d2 and distanceBetweenPoints < d3:
d3 = distanceBetweenPoints
pointIndex3 = i
# Interpolate data
k = 2.0
w1 = 0.0
w2 = 0.0
w3 = 0.0
zTpro = 0.0
zRain = 0.0
zHr = 0.0
inverseSum = pow((1 / d1),k) + pow((1 / d2),k) + pow((1 / d3),k)
w1 = 1 / pow(d1,k) / inverseSum
w2 = 1 / pow(d2,k) / inverseSum
w3 = 1 / pow(d3,k) / inverseSum
zTpro = (w1 * Tpro[pointIndex1]) + (w2 * Tpro[pointIndex2]) + (w3 * Tpro[pointIndex3])
zRain = (w1 * Rain[pointIndex1]) + (w2 * Rain[pointIndex2]) + (w3 * Rain[pointIndex3])
zHr = (w1 * Hr[pointIndex1]) + (w2 * Hr[pointIndex2]) + (w3 * Hr[pointIndex3])
# structure 1
dataBaseStructure += '{},{},{},{},{},{},{},{},{},{},{}\n'.format(pointNumber, 'Station', 'SON', pointLat, pointLat, pointYear, pointMonth, pointDay, pointRain, pointHr, pointTpro)
dataBaseStructure += '{},{},{},{},{},{},{},{},{},{},{}\n'.format(pointNumber, 'WRF', 'SON', pointLat, pointLat, pointYear, pointMonth, pointDay, zRain, zHr, zTpro)
# structure 2
dataBaseStructuretTest += '{},{},{},{},{},{},{},{},{},{},{},{},{}\n'.format(pointNumber, 'SON', pointLat, pointLat, pointYear, pointMonth, pointDay, pointRain, pointHr, pointTpro, zRain, zHr, zTpro)
#%% Save to csv data 1
fileName = 'dataFromSonora.csv'
textFile = open(fileName, "w")
textFile.write(dataBaseStructure)
textFile.close()
#%% Save to csv data 2
fileName = 'dataFromSonoratTest.csv'
textFile = open(fileName, "w")
textFile.write(dataBaseStructuretTest)
textFile.close()
| 32.296053
| 199
| 0.672337
| 1,202
| 9,818
| 5.483361
| 0.153078
| 0.00789
| 0.033986
| 0.047337
| 0.879988
| 0.831133
| 0.82112
| 0.805644
| 0.780458
| 0.778638
| 0
| 0.038577
| 0.149827
| 9,818
| 304
| 200
| 32.296053
| 0.751048
| 0.09615
| 0
| 0.818182
| 0
| 0.018182
| 0.142177
| 0.069559
| 0.018182
| 0
| 0
| 0.003289
| 0
| 1
| 0
| false
| 0
| 0.018182
| 0
| 0.018182
| 0.027273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
584e6464a614b613ffcd14cd2342cd05fc6ba015
| 159
|
py
|
Python
|
uiautomationtools/selenium/__init__.py
|
asboxi/ui-automation-tools-mbt
|
552abde2c228fb9eab83126a645b4ab6276f1bb4
|
[
"MIT"
] | null | null | null |
uiautomationtools/selenium/__init__.py
|
asboxi/ui-automation-tools-mbt
|
552abde2c228fb9eab83126a645b4ab6276f1bb4
|
[
"MIT"
] | 4
|
2021-11-04T04:45:37.000Z
|
2021-11-12T06:24:10.000Z
|
uiautomationtools/selenium/__init__.py
|
asboxi/ui-automation-tools-mbt
|
552abde2c228fb9eab83126a645b4ab6276f1bb4
|
[
"MIT"
] | 4
|
2021-10-18T05:46:38.000Z
|
2021-11-26T05:25:39.000Z
|
from uiautomationtools.selenium.appium.appium_factory import appium_factory
from uiautomationtools.selenium.selenium.selenium_extended import SeleniumExtended
| 53
| 82
| 0.91195
| 17
| 159
| 8.352941
| 0.470588
| 0.295775
| 0.408451
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050314
| 159
| 2
| 83
| 79.5
| 0.940397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
584ff690dda1577ceb5f144a666b47aac6b3e874
| 41,647
|
py
|
Python
|
quay/api/logs_api.py
|
angeiv/python-quay
|
16072f87956d8f581ac9ebccc67f6563e977cf52
|
[
"MIT"
] | null | null | null |
quay/api/logs_api.py
|
angeiv/python-quay
|
16072f87956d8f581ac9ebccc67f6563e977cf52
|
[
"MIT"
] | null | null | null |
quay/api/logs_api.py
|
angeiv/python-quay
|
16072f87956d8f581ac9ebccc67f6563e977cf52
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Quay Frontend
This API allows you to perform many of the operations required to work with Quay repositories, users, and organizations. You can find out more at <a href=\"https://quay.io\">Quay</a>. # noqa: E501
OpenAPI spec version: v1
Contact: support@quay.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from quay.api_client import ApiClient
class LogsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def export_org_logs(self, body, orgname, **kwargs): # noqa: E501
"""export_org_logs # noqa: E501
Exports the logs for the specified organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_org_logs(body, orgname, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ExportLogs body: Request body contents. (required)
:param str orgname: The name of the organization (required)
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.export_org_logs_with_http_info(body, orgname, **kwargs) # noqa: E501
else:
(data) = self.export_org_logs_with_http_info(body, orgname, **kwargs) # noqa: E501
return data
def export_org_logs_with_http_info(self, body, orgname, **kwargs): # noqa: E501
"""export_org_logs # noqa: E501
Exports the logs for the specified organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_org_logs_with_http_info(body, orgname, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ExportLogs body: Request body contents. (required)
:param str orgname: The name of the organization (required)
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'orgname', 'endtime', 'starttime'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method export_org_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `export_org_logs`") # noqa: E501
# verify the required parameter 'orgname' is set
if ('orgname' not in params or
params['orgname'] is None):
raise ValueError("Missing the required parameter `orgname` when calling `export_org_logs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orgname' in params:
path_params['orgname'] = params['orgname'] # noqa: E501
query_params = []
if 'endtime' in params:
query_params.append(('endtime', params['endtime'])) # noqa: E501
if 'starttime' in params:
query_params.append(('starttime', params['starttime'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organization/{orgname}/exportlogs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def export_repo_logs(self, body, repository, **kwargs): # noqa: E501
"""export_repo_logs # noqa: E501
Queues an export of the logs for the specified repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_repo_logs(body, repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ExportLogs body: Request body contents. (required)
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.export_repo_logs_with_http_info(body, repository, **kwargs) # noqa: E501
else:
(data) = self.export_repo_logs_with_http_info(body, repository, **kwargs) # noqa: E501
return data
def export_repo_logs_with_http_info(self, body, repository, **kwargs): # noqa: E501
"""export_repo_logs # noqa: E501
Queues an export of the logs for the specified repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_repo_logs_with_http_info(body, repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ExportLogs body: Request body contents. (required)
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'repository', 'endtime', 'starttime'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method export_repo_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `export_repo_logs`") # noqa: E501
# verify the required parameter 'repository' is set
if ('repository' not in params or
params['repository'] is None):
raise ValueError("Missing the required parameter `repository` when calling `export_repo_logs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'repository' in params:
path_params['repository'] = params['repository'] # noqa: E501
query_params = []
if 'endtime' in params:
query_params.append(('endtime', params['endtime'])) # noqa: E501
if 'starttime' in params:
query_params.append(('starttime', params['starttime'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/repository/{repository}/exportlogs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def export_user_logs(self, body, **kwargs): # noqa: E501
"""export_user_logs # noqa: E501
Returns the aggregated logs for the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_user_logs(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ExportLogs body: Request body contents. (required)
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.export_user_logs_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.export_user_logs_with_http_info(body, **kwargs) # noqa: E501
return data
def export_user_logs_with_http_info(self, body, **kwargs): # noqa: E501
"""export_user_logs # noqa: E501
Returns the aggregated logs for the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_user_logs_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ExportLogs body: Request body contents. (required)
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'endtime', 'starttime'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method export_user_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `export_user_logs`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'endtime' in params:
query_params.append(('endtime', params['endtime'])) # noqa: E501
if 'starttime' in params:
query_params.append(('starttime', params['starttime'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/user/exportlogs', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_aggregate_org_logs(self, orgname, **kwargs): # noqa: E501
"""get_aggregate_org_logs # noqa: E501
Gets the aggregated logs for the specified organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_aggregate_org_logs(orgname, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orgname: The name of the organization (required)
:param str performer: Username for which to filter logs.
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_aggregate_org_logs_with_http_info(orgname, **kwargs) # noqa: E501
else:
(data) = self.get_aggregate_org_logs_with_http_info(orgname, **kwargs) # noqa: E501
return data
def get_aggregate_org_logs_with_http_info(self, orgname, **kwargs): # noqa: E501
"""get_aggregate_org_logs # noqa: E501
Gets the aggregated logs for the specified organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_aggregate_org_logs_with_http_info(orgname, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orgname: The name of the organization (required)
:param str performer: Username for which to filter logs.
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orgname', 'performer', 'endtime', 'starttime'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_aggregate_org_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'orgname' is set
if ('orgname' not in params or
params['orgname'] is None):
raise ValueError("Missing the required parameter `orgname` when calling `get_aggregate_org_logs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orgname' in params:
path_params['orgname'] = params['orgname'] # noqa: E501
query_params = []
if 'performer' in params:
query_params.append(('performer', params['performer'])) # noqa: E501
if 'endtime' in params:
query_params.append(('endtime', params['endtime'])) # noqa: E501
if 'starttime' in params:
query_params.append(('starttime', params['starttime'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organization/{orgname}/aggregatelogs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_aggregate_repo_logs(self, repository, **kwargs): # noqa: E501
"""get_aggregate_repo_logs # noqa: E501
Returns the aggregated logs for the specified repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_aggregate_repo_logs(repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_aggregate_repo_logs_with_http_info(repository, **kwargs) # noqa: E501
else:
(data) = self.get_aggregate_repo_logs_with_http_info(repository, **kwargs) # noqa: E501
return data
def get_aggregate_repo_logs_with_http_info(self, repository, **kwargs): # noqa: E501
"""get_aggregate_repo_logs # noqa: E501
Returns the aggregated logs for the specified repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_aggregate_repo_logs_with_http_info(repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['repository', 'endtime', 'starttime'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_aggregate_repo_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'repository' is set
if ('repository' not in params or
params['repository'] is None):
raise ValueError("Missing the required parameter `repository` when calling `get_aggregate_repo_logs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'repository' in params:
path_params['repository'] = params['repository'] # noqa: E501
query_params = []
if 'endtime' in params:
query_params.append(('endtime', params['endtime'])) # noqa: E501
if 'starttime' in params:
query_params.append(('starttime', params['starttime'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/repository/{repository}/aggregatelogs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_aggregate_user_logs(self, **kwargs): # noqa: E501
"""get_aggregate_user_logs # noqa: E501
Returns the aggregated logs for the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_aggregate_user_logs(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str performer: Username for which to filter logs.
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_aggregate_user_logs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_aggregate_user_logs_with_http_info(**kwargs) # noqa: E501
return data
def get_aggregate_user_logs_with_http_info(self, **kwargs): # noqa: E501
"""get_aggregate_user_logs # noqa: E501
Returns the aggregated logs for the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_aggregate_user_logs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str performer: Username for which to filter logs.
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['performer', 'endtime', 'starttime'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_aggregate_user_logs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'performer' in params:
query_params.append(('performer', params['performer'])) # noqa: E501
if 'endtime' in params:
query_params.append(('endtime', params['endtime'])) # noqa: E501
if 'starttime' in params:
query_params.append(('starttime', params['starttime'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/user/aggregatelogs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_org_logs(self, orgname, **kwargs): # noqa: E501
"""list_org_logs # noqa: E501
List the logs for the specified organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_org_logs(orgname, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orgname: The name of the organization (required)
:param str next_page: The page token for the next page
:param str performer: Username for which to filter logs.
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_org_logs_with_http_info(orgname, **kwargs) # noqa: E501
else:
(data) = self.list_org_logs_with_http_info(orgname, **kwargs) # noqa: E501
return data
def list_org_logs_with_http_info(self, orgname, **kwargs): # noqa: E501
"""list_org_logs # noqa: E501
List the logs for the specified organization. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_org_logs_with_http_info(orgname, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str orgname: The name of the organization (required)
:param str next_page: The page token for the next page
:param str performer: Username for which to filter logs.
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['orgname', 'next_page', 'performer', 'endtime', 'starttime'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_org_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'orgname' is set
if ('orgname' not in params or
params['orgname'] is None):
raise ValueError("Missing the required parameter `orgname` when calling `list_org_logs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'orgname' in params:
path_params['orgname'] = params['orgname'] # noqa: E501
query_params = []
if 'next_page' in params:
query_params.append(('next_page', params['next_page'])) # noqa: E501
if 'performer' in params:
query_params.append(('performer', params['performer'])) # noqa: E501
if 'endtime' in params:
query_params.append(('endtime', params['endtime'])) # noqa: E501
if 'starttime' in params:
query_params.append(('starttime', params['starttime'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/organization/{orgname}/logs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_repo_logs(self, repository, **kwargs): # noqa: E501
"""list_repo_logs # noqa: E501
List the logs for the specified repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_repo_logs(repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param str next_page: The page token for the next page
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_repo_logs_with_http_info(repository, **kwargs) # noqa: E501
else:
(data) = self.list_repo_logs_with_http_info(repository, **kwargs) # noqa: E501
return data
def list_repo_logs_with_http_info(self, repository, **kwargs): # noqa: E501
"""list_repo_logs # noqa: E501
List the logs for the specified repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_repo_logs_with_http_info(repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repository: The full path of the repository. e.g. namespace/name (required)
:param str next_page: The page token for the next page
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['repository', 'next_page', 'endtime', 'starttime'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_repo_logs" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'repository' is set
if ('repository' not in params or
params['repository'] is None):
raise ValueError("Missing the required parameter `repository` when calling `list_repo_logs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'repository' in params:
path_params['repository'] = params['repository'] # noqa: E501
query_params = []
if 'next_page' in params:
query_params.append(('next_page', params['next_page'])) # noqa: E501
if 'endtime' in params:
query_params.append(('endtime', params['endtime'])) # noqa: E501
if 'starttime' in params:
query_params.append(('starttime', params['starttime'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/repository/{repository}/logs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_user_logs(self, **kwargs): # noqa: E501
"""list_user_logs # noqa: E501
List the logs for the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_user_logs(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str next_page: The page token for the next page
:param str performer: Username for which to filter logs.
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_user_logs_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_user_logs_with_http_info(**kwargs) # noqa: E501
return data
def list_user_logs_with_http_info(self, **kwargs): # noqa: E501
"""list_user_logs # noqa: E501
List the logs for the current user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_user_logs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str next_page: The page token for the next page
:param str performer: Username for which to filter logs.
:param str endtime: Latest time for logs. Format: \"%m/%d/%Y\" in UTC.
:param str starttime: Earliest time for logs. Format: \"%m/%d/%Y\" in UTC.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['next_page', 'performer', 'endtime', 'starttime'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_user_logs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'next_page' in params:
query_params.append(('next_page', params['next_page'])) # noqa: E501
if 'performer' in params:
query_params.append(('performer', params['performer'])) # noqa: E501
if 'endtime' in params:
query_params.append(('endtime', params['endtime'])) # noqa: E501
if 'starttime' in params:
query_params.append(('starttime', params['starttime'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/user/logs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 41.605395
| 201
| 0.605446
| 4,940
| 41,647
| 4.890486
| 0.040283
| 0.051327
| 0.026036
| 0.025332
| 0.975951
| 0.975454
| 0.971356
| 0.959228
| 0.956745
| 0.954924
| 0
| 0.016677
| 0.294499
| 41,647
| 1,000
| 202
| 41.647
| 0.805561
| 0.364348
| 0
| 0.831144
| 0
| 0
| 0.189747
| 0.041599
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035647
| false
| 0
| 0.007505
| 0
| 0.095685
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
586ba6bd429cc19ae95beef46e51bbc0874c023b
| 101
|
py
|
Python
|
pyintercept/handlers/pdb_handler.py
|
caioariede/pyintercept
|
19039ce3038521bf32aaafe207024adeb0096749
|
[
"MIT"
] | 32
|
2015-07-20T21:13:26.000Z
|
2018-04-05T13:53:28.000Z
|
pyintercept/handlers/pdb_handler.py
|
caioariede/pyintercept
|
19039ce3038521bf32aaafe207024adeb0096749
|
[
"MIT"
] | 2
|
2019-07-23T17:38:06.000Z
|
2020-02-27T13:38:02.000Z
|
pyintercept/handlers/pdb_handler.py
|
caioariede/pyintercept
|
19039ce3038521bf32aaafe207024adeb0096749
|
[
"MIT"
] | 3
|
2015-08-09T14:48:38.000Z
|
2020-02-27T12:58:46.000Z
|
def pdb(origfn, *args, **kwargs):
import pdb; pdb.set_trace()
return origfn(*args, **kwargs)
| 25.25
| 34
| 0.643564
| 14
| 101
| 4.571429
| 0.642857
| 0.3125
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178218
| 101
| 3
| 35
| 33.666667
| 0.771084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
58a03d3901160c60664e9d184af8b6a187758101
| 3,434
|
py
|
Python
|
test/obsolete/cmvspy.py
|
hollinsky-intrepid/python_ics
|
b6ec5486ec3cc2548e33845c265faccf293b88f5
|
[
"Unlicense"
] | 45
|
2017-10-17T08:42:08.000Z
|
2022-02-21T16:26:48.000Z
|
test/cmvspy.py
|
ic3man5/python_ics
|
e2dfbb60e14d6292a14c6f7685ca8dd4ce2f6916
|
[
"Unlicense"
] | 106
|
2017-03-07T21:10:39.000Z
|
2022-03-29T15:32:46.000Z
|
test/cmvspy.py
|
ic3man5/python_ics
|
e2dfbb60e14d6292a14c6f7685ca8dd4ce2f6916
|
[
"Unlicense"
] | 17
|
2017-04-04T12:30:22.000Z
|
2022-01-28T05:30:25.000Z
|
#This file was automatically generated by 'cmvspyconvert.py'
#from a cmvspy.h file.
#Example usage:
# from cmvspy import data as coremini_data
data = ( \
0x07, 0x09, 0x13, 0x00, 0x00, 0x02, 0x2B, 0x20, 0x04, 0xD2, \
0xBA, 0xB4, 0x3F, 0x43, 0xFF, 0xF4, 0xBD, 0x2A, 0x75, 0xE4, \
0x47, 0xFA, 0xE1, 0xE4, 0xA8, 0x82, 0xCE, 0x87, 0x0E, 0xB3, \
0xEA, 0x7A, 0xD5, 0xF1, 0x6B, 0x2E, 0x1B, 0x8F, 0x0B, 0x82, \
0x17, 0xE8, 0x09, 0x4D, 0x88, 0x78, 0xED, 0xA4, 0xC1, 0x40, \
0x67, 0x73, 0xE3, 0xB0, 0xB6, 0xAD, 0xC6, 0x7C, 0x9D, 0x0C, \
0x64, 0x71, 0xF9, 0xEF, 0xD0, 0xFF, 0xD1, 0x62, 0x3A, 0x7D, \
0x6D, 0x31, 0x59, 0xA6, 0x9D, 0x2D, 0xB1, 0x2E, 0x3B, 0x84, \
0xD3, 0x53, 0x56, 0xC8, 0x18, 0x45, 0x81, 0xDB, 0x0A, 0x3D, \
0x6D, 0xAB, 0x06, 0x4F, 0x2D, 0x38, 0x0A, 0x3D, 0x6D, 0xAB, \
0x06, 0x4F, 0x2D, 0x38, 0x0A, 0x3D, 0x6D, 0xAB, 0x06, 0x4F, \
0x2D, 0x38, 0x0A, 0x3D, 0x6D, 0xAB, 0x06, 0x4F, 0x2D, 0x38, \
0x7B, 0xE6, 0x3C, 0x8F, 0x4F, 0x6D, 0xB6, 0xE5, 0x05, 0x6F, \
0x8C, 0xCE, 0xA4, 0x0A, 0xDC, 0x31, 0xDA, 0x52, 0x6F, 0xE6, \
0xE2, 0xC2, 0x3A, 0xF3, 0xA7, 0xF5, 0x30, 0x48, 0xD7, 0x91, \
0x22, 0x0E, 0x6E, 0x18, 0xF1, 0x05, 0xF1, 0xEB, 0xF9, 0xEF, \
0x93, 0x7F, 0x60, 0x67, 0x94, 0x13, 0xCF, 0x9D, 0x78, 0x8B, \
0xD5, 0x18, 0x06, 0xEE, 0xA4, 0xE5, 0x8D, 0xBA, 0x17, 0x22, \
0x0C, 0x72, 0x19, 0xD2, 0xE8, 0xC9, 0x11, 0x03, 0xEE, 0x0D, \
0x3B, 0x9A, 0xBD, 0x0C, 0x16, 0x28, 0x51, 0x47, 0x1A, 0x42, \
0x0D, 0xEA, 0xD3, 0x56, 0x72, 0x00, 0xD4, 0x55, 0xB8, 0x69, \
0x4C, 0xCD, 0xB9, 0x7F, 0xDB, 0x50, 0xE4, 0x67, 0xB5, 0xFC, \
0x7F, 0x10, 0x63, 0xF2, 0x33, 0xA1, 0xD8, 0x00, 0x4D, 0xEE, \
0xE8, 0x7A, 0xE0, 0xC4, 0x40, 0x0C, 0x90, 0x9A, 0xC8, 0x28, \
0x43, 0x86, 0x06, 0xA8, 0xC7, 0xE8, 0x38, 0xE7, 0xC4, 0x9B, \
0x7D, 0xC0, 0x50, 0x82, 0x27, 0x91, 0x67, 0xFF, 0x14, 0xDF, \
0x62, 0x49, 0x0C, 0x0F, 0x9E, 0x1A, 0xC2, 0xA7, 0xF5, 0x03, \
0x63, 0x15, 0x63, 0xB0, 0x10, 0x33, 0x14, 0xF9, 0xEC, 0x6A, \
0xAA, 0x7C, 0xB3, 0x91, 0x79, 0x5C, 0x09, 0x79, 0xDC, 0x6C, \
0xED, 0x97, 0x66, 0xB7, 0x49, 0xD7, 0xB5, 0x64, 0xAD, 0xCB, \
0x3F, 0x30, 0x8B, 0x69, 0x5F, 0xF1, 0xB5, 0x23, 0x2D, 0x14, \
0xE8, 0x9A, 0x2E, 0xC7, 0xFE, 0xEA, 0xBA, 0x3F, 0x4E, 0x27, \
0x9E, 0xDA, 0xC1, 0x57, 0xEE, 0x9B, 0x88, 0x77, 0x8D, 0xB0, \
0x8C, 0x55, 0x3F, 0x9C, 0xC3, 0x3F, 0xF9, 0x97, 0x59, 0xDA, \
0x15, 0xAA, 0xFB, 0xF6, 0xAF, 0xA4, 0x4B, 0x50, 0xC1, 0xB0, \
0x0A, 0x93, 0x1B, 0xF7, 0x1F, 0x44, 0xB7, 0xFD, 0x4E, 0x2B, \
0x54, 0xDD, 0xF2, 0x55, 0xB5, 0xC7, 0xA9, 0xF1, 0x30, 0x0B, \
0xAF, 0x29, 0xF8, 0xB3, 0xB0, 0x99, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, )
| 58.20339
| 63
| 0.63046
| 535
| 3,434
| 4.04486
| 0.392523
| 0.502773
| 0.743068
| 0.983364
| 0.310536
| 0.310536
| 0.310536
| 0.310536
| 0.310536
| 0.310536
| 0
| 0.45186
| 0.201514
| 3,434
| 58
| 64
| 59.206897
| 0.337345
| 0.039313
| 0
| 0.245283
| 1
| 0
| 0
| 0
| 0
| 0
| 0.621548
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58a65e7ca457f1c3d572d9af2717b08e68f34a49
| 200
|
py
|
Python
|
featuretools/primitives/standard/api.py
|
sandutsar/featuretools
|
4508dc71c8da981a0cf652b1f09fede48bf263af
|
[
"BSD-3-Clause"
] | null | null | null |
featuretools/primitives/standard/api.py
|
sandutsar/featuretools
|
4508dc71c8da981a0cf652b1f09fede48bf263af
|
[
"BSD-3-Clause"
] | 21
|
2021-10-15T00:42:29.000Z
|
2021-12-28T22:00:47.000Z
|
featuretools/primitives/standard/api.py
|
sandutsar/featuretools
|
4508dc71c8da981a0cf652b1f09fede48bf263af
|
[
"BSD-3-Clause"
] | null | null | null |
# flake8: noqa
from .aggregation_primitives import *
from .binary_transform import *
from .cum_transform_feature import *
from .rolling_transform_primitive import *
from .transform_primitive import *
| 28.571429
| 42
| 0.825
| 24
| 200
| 6.583333
| 0.5
| 0.253165
| 0.303797
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00565
| 0.115
| 200
| 6
| 43
| 33.333333
| 0.887006
| 0.06
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5441a14ec0fe79eaeeea73221fd019d58e343c3e
| 4,210
|
py
|
Python
|
10.66. Sorted.py
|
kyumiouchi/python-basic-to-advanced
|
a774085a5d0e0bfed90098e0f27c8fb7b760d9a7
|
[
"Apache-2.0"
] | null | null | null |
10.66. Sorted.py
|
kyumiouchi/python-basic-to-advanced
|
a774085a5d0e0bfed90098e0f27c8fb7b760d9a7
|
[
"Apache-2.0"
] | null | null | null |
10.66. Sorted.py
|
kyumiouchi/python-basic-to-advanced
|
a774085a5d0e0bfed90098e0f27c8fb7b760d9a7
|
[
"Apache-2.0"
] | null | null | null |
"""
Sorted
OBS: It is different of sort() of List.
Sorted => Can sort N iterable.
OBS: sorted ALWAYS return a LIST
list_number = [1, 4, 6, 2, 1]
print(list_number) # [1, 4, 6, 2, 1]
print(sorted(list_number)) # [1, 1, 2, 4, 6]
print(list_number) # [1, 4, 6, 2, 1]
set_number = {1, 4, 6, 2, 1}
print(sorted(set_number)) # [1, 2, 4, 6]
print(sorted(list_number, reverse=True)) # [6, 4, 2, 1, 1]
print(tuple(sorted(list_number, reverse=True))) # (6, 4, 2, 1, 1)
print(set(sorted(list_number, reverse=True))) # {1, 2, 4, 6}
users = [
{"username": "samuel", "tweet": ["I love cakes", "I love pizza"]},
{"username": "carlla", "tweet": ["I love cats"]},
{"username": "jeff", "tweet": []},
{"username": "bob", "tweet": []},
{"username": "goggo", "tweet": ["I love dogs", "I am going out today"]},
{"username": "gal", "tweet": []}
]
print(users)
# [{'username': 'samuel', 'tweet': ['I love cakes', 'I love pizza']}, {'username': 'carlla', 'tweet': ['I love
# cats']}, {'username': 'jeff', 'tweet': []}, {'username': 'bob', 'tweet': []}, {'username': 'goggo', 'tweet': ['I
# love dogs', 'I am going out today']}, {'username': 'gal', 'tweet': []}] print(sorted(users)) # TypeError: '<' not
# supported between instances of 'dict' and 'dict'
print(sorted(users, key=len))
# [{'username': 'samuel', 'tweet': ['I love cakes', 'I love pizza']}, {'username': 'carlla', 'tweet': ['I love
# cats']}, {'username': 'jeff', 'tweet': []}, {'username': 'bob', 'tweet': []}, {'username': 'goggo', 'tweet': ['I
# love dogs', 'I am going out today']}, {'username': 'gal', 'tweet': []}]
users = [
{"username": "samuel", "tweet": ["I love cakes", "I love pizza"]},
{"username": "carlla", "tweet": ["I love cats"]},
{"username": "jeff", "tweet": []},
{"username": "bob", "tweet": [], "cor": "yellow"},
{"username": "goggo", "tweet": ["I love dogs", "I am going out today"]},
{"username": "gal", "tweet": [], "cor": "black", "music": "Rock"}
]
print(sorted(users, key=len)) # nothing happen
# [{'username': 'samuel', 'tweet': ['I love cakes', 'I love pizza']}, {'username': 'carlla', 'tweet': ['I love
# cats']}, {'username': 'jeff', 'tweet': []}, {'username': 'goggo', 'tweet': ['I love dogs', 'I am going out
# today']}, {'username': 'bob', 'tweet': [], 'cor': 'yellow'}, {'username': 'gal', 'tweet': [], 'cor': 'black',
# 'music': 'Rock'}]
print(sorted(users, key=lambda user: user["username"]))
# [{'username': 'bob', 'tweet': [], 'cor': 'yellow'}, {'username': 'carlla', 'tweet': ['I love cats']}, {'username':
# 'gal', 'tweet': [], 'cor': 'black', 'music': 'Rock'}, {'username': 'goggo', 'tweet': ['I love dogs', 'I am going
# out today']}, {'username': 'jeff', 'tweet': []}, {'username': 'samuel', 'tweet': ['I love cakes', 'I love pizza']}]
print(sorted(users, key=lambda user: user["tweet"]))
# [{'username': 'jeff', 'tweet': []}, {'username': 'bob', 'tweet': [], 'cor': 'yellow'}, {'username': 'gal',
# 'tweet': [], 'cor': 'black', 'music': 'Rock'}, {'username': 'samuel', 'tweet': ['I love cakes', 'I love pizza']},
# {'username': 'carlla', 'tweet': ['I love cats']}, {'username': 'goggo', 'tweet': ['I love dogs', 'I am going out
# today']}]
print(sorted(users, key=lambda user: len(user["tweet"])))
# [{'username': 'jeff', 'tweet': []}, {'username': 'bob', 'tweet': [], 'cor': 'yellow'}, {'username': 'gal',
# 'tweet': [], 'cor': 'black', 'music': 'Rock'}, {'username': 'carlla', 'tweet': ['I love cats']}, {'username':
# 'samuel', 'tweet': ['I love cakes', 'I love pizza']}, {'username': 'goggo', 'tweet': ['I love dogs', 'I am going
# out today']}]
"""
musics = [
{"title": "Thunderstruck", "sing": 3},
{"title": "Dead Bla", "sing": 2},
{"title": "Back Bla", "sing": 4},
{"title": "Too old bla", "sing": 32},
]
print(sorted(musics, key=lambda music: music['sing']))
# [{'title': 'Dead Bla', 'sing': 2}, {'title': 'Thunderstruck', 'sing': 3}, {'title': 'Back Bla', 'sing': 4},
# {'title': 'Too old bla', 'sing': 32}]
print(sorted(musics, key=lambda music: music['sing'], reverse=True))
# [{'title': 'Too old bla', 'sing': 32}, {'title': 'Back Bla', 'sing': 4}, {'title': 'Thunderstruck', 'sing': 3},
# {'title': 'Dead Bla', 'sing': 2}]
| 47.303371
| 117
| 0.551306
| 543
| 4,210
| 4.257827
| 0.132597
| 0.069204
| 0.103806
| 0.069204
| 0.901384
| 0.865484
| 0.816609
| 0.777249
| 0.724913
| 0.66955
| 0
| 0.016292
| 0.154394
| 4,210
| 88
| 118
| 47.840909
| 0.633146
| 0.924941
| 0
| 0
| 0
| 0
| 0.27541
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b7294ce8bdb28d236e08baeaf6d0bfce75f03786
| 14,119
|
py
|
Python
|
nominals/migrations/0001_initial.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 11
|
2021-01-23T01:09:54.000Z
|
2021-01-25T07:16:30.000Z
|
nominals/migrations/0001_initial.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 7
|
2021-04-06T18:19:10.000Z
|
2021-09-22T19:45:03.000Z
|
nominals/migrations/0001_initial.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 3
|
2021-01-23T18:55:32.000Z
|
2021-02-16T17:47:59.000Z
|
# Generated by Django 3.1.3 on 2021-01-01 15:00
import accountancy.fields
import accountancy.mixins
import accountancy.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
import nominals.models
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('controls', '0001_initial'),
('vat', '__first__'),
]
operations = [
migrations.CreateModel(
name='Nominal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('type', models.CharField(blank=True, choices=[('pl', 'profit and loss'), ('b', 'balance sheet')], max_length=2, null=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='nominals.nominal')),
],
bases=(accountancy.mixins.AuditMixin, models.Model),
),
migrations.CreateModel(
name='NominalHeader',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ref', models.CharField(max_length=20)),
('goods', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('discount', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('vat', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('total', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('paid', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('due', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('date', models.DateField()),
('due_date', models.DateField(blank=True, null=True)),
('status', models.CharField(choices=[('c', 'cleared'), ('v', 'void')], default='c', max_length=2)),
('created', models.DateTimeField(auto_now_add=True)),
('type', models.CharField(choices=[('nj', 'Journal')], max_length=2)),
('vat_type', models.CharField(blank=True, choices=[('i', 'Input'), ('o', 'Output')], max_length=2, null=True)),
('period', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='controls.period')),
],
options={
'permissions': [('view_transactions_enquiry', 'Can view transactions'), ('view_trial_balance_report', 'Can view trial balance report'), ('create_journal_transaction', 'Can create journal'), ('edit_journal_transaction', 'Can edit journal'), ('view_journal_transaction', 'Can view journal'), ('void_journal_transaction', 'Can void journal')],
},
bases=(nominals.models.ModuleTransactions, accountancy.mixins.AuditMixin, accountancy.models.TransactionBase, models.Model),
),
migrations.CreateModel(
name='NominalTransaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('module', models.CharField(max_length=3)),
('header', models.PositiveIntegerField()),
('line', models.PositiveIntegerField()),
('ref', models.CharField(max_length=100)),
('date', models.DateField()),
('created', models.DateTimeField(auto_now=True)),
('field', models.CharField(choices=[('g', 'Goods'), ('v', 'Vat'), ('t', 'Total')], max_length=2)),
('type', models.CharField(choices=[('pp', 'Payment'), ('pr', 'Refund'), ('pi', 'Invoice'), ('pc', 'Credit Note'), ('nj', 'Journal'), ('sp', 'Receipt'), ('sr', 'Refund'), ('si', 'Invoice'), ('sc', 'Credit Note'), ('cp', 'Payment'), ('cr', 'Receipt'), ('nbf', 'Year End Brought Forward')], max_length=10)),
('value', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('nominal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nominals.nominal')),
('period', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='controls.period')),
],
),
migrations.CreateModel(
name='NominalLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('line_no', models.IntegerField()),
('description', models.CharField(max_length=100)),
('goods', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('vat', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('type', models.CharField(choices=[('nj', 'Journal')], max_length=3)),
('goods_nominal_transaction', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='nominal_good_line', to='nominals.nominaltransaction')),
('header', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nominals.nominalheader')),
('nominal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nominals.nominal')),
('vat_code', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='vat.vat', verbose_name='Vat Code')),
('vat_nominal_transaction', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='nominal_vat_line', to='nominals.nominaltransaction')),
('vat_transaction', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='nominal_line_vat_transaction', to='vat.vattransaction')),
],
options={
'abstract': False,
},
bases=(nominals.models.ModuleTransactions, accountancy.mixins.AuditMixin, accountancy.models.TransactionBase, models.Model),
),
migrations.CreateModel(
name='HistoricalNominalLine',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('line_no', models.IntegerField()),
('description', models.CharField(max_length=100)),
('goods', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('vat', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('type', models.CharField(choices=[('nj', 'Journal')], max_length=3)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('goods_nominal_transaction', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nominals.nominaltransaction')),
('header', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nominals.nominalheader')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('nominal', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nominals.nominal')),
('vat_code', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='vat.vat', verbose_name='Vat Code')),
('vat_nominal_transaction', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nominals.nominaltransaction')),
('vat_transaction', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='vat.vattransaction')),
],
options={
'verbose_name': 'historical nominal line',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalNominalHeader',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('ref', models.CharField(max_length=20)),
('goods', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('discount', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('vat', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('total', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('paid', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('due', accountancy.fields.UIDecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('date', models.DateField()),
('due_date', models.DateField(blank=True, null=True)),
('status', models.CharField(choices=[('c', 'cleared'), ('v', 'void')], default='c', max_length=2)),
('created', models.DateTimeField(blank=True, editable=False)),
('type', models.CharField(choices=[('nj', 'Journal')], max_length=2)),
('vat_type', models.CharField(blank=True, choices=[('i', 'Input'), ('o', 'Output')], max_length=2, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('period', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='controls.period')),
],
options={
'verbose_name': 'historical nominal header',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='HistoricalNominal',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('type', models.CharField(blank=True, choices=[('pl', 'profit and loss'), ('b', 'balance sheet')], max_length=2, null=True)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('parent', mptt.fields.TreeForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='nominals.nominal')),
],
options={
'verbose_name': 'historical nominal',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.AddConstraint(
model_name='nominaltransaction',
constraint=models.UniqueConstraint(fields=('module', 'header', 'line', 'field'), name='nominal_unique_batch'),
),
migrations.AddConstraint(
model_name='nominal',
constraint=models.UniqueConstraint(fields=('name', 'parent'), name='nominal_unique'),
),
]
| 73.15544
| 356
| 0.62207
| 1,486
| 14,119
| 5.741588
| 0.127187
| 0.040319
| 0.036099
| 0.056728
| 0.804501
| 0.778598
| 0.766878
| 0.764416
| 0.764416
| 0.756798
| 0
| 0.010295
| 0.215738
| 14,119
| 192
| 357
| 73.536458
| 0.760228
| 0.003187
| 0
| 0.654054
| 1
| 0
| 0.164227
| 0.037735
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.048649
| 0
| 0.07027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b73629b20f1372f099b9739bfedbbb6f92bd1844
| 3,210
|
py
|
Python
|
python/graphscope/nx/algorithms/tests/forward/test_approximation.py
|
lnfjpt/GraphScope
|
917146f86d8387302a2e1de6963115e7568bf3ee
|
[
"Apache-2.0"
] | 1
|
2021-12-30T02:55:16.000Z
|
2021-12-30T02:55:16.000Z
|
python/graphscope/nx/algorithms/tests/forward/test_approximation.py
|
lnfjpt/GraphScope
|
917146f86d8387302a2e1de6963115e7568bf3ee
|
[
"Apache-2.0"
] | null | null | null |
python/graphscope/nx/algorithms/tests/forward/test_approximation.py
|
lnfjpt/GraphScope
|
917146f86d8387302a2e1de6963115e7568bf3ee
|
[
"Apache-2.0"
] | null | null | null |
import networkx.algorithms.approximation.tests.test_approx_clust_coeff
import networkx.algorithms.approximation.tests.test_clique
import networkx.algorithms.approximation.tests.test_connectivity
import networkx.algorithms.approximation.tests.test_distance_measures
import networkx.algorithms.approximation.tests.test_dominating_set
import networkx.algorithms.approximation.tests.test_kcomponents
import networkx.algorithms.approximation.tests.test_matching
import networkx.algorithms.approximation.tests.test_maxcut
import networkx.algorithms.approximation.tests.test_ramsey
import networkx.algorithms.approximation.tests.test_steinertree
import networkx.algorithms.approximation.tests.test_traveling_salesman
import networkx.algorithms.approximation.tests.test_treewidth
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_approx_clust_coeff,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_clique,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_connectivity,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_distance_measures,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_dominating_set,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_kcomponents,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_matching,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_maxcut,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_ramsey,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_steinertree,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_traveling_salesman,
decorators=pytest.mark.usefixtures("graphscope_session"))
import_as_graphscope_nx(networkx.algorithms.approximation.tests.test_treewidth,
decorators=pytest.mark.usefixtures("graphscope_session"))
pytest.mark.usefixtures("graphscope_session")
pytest.mark.skip(reason="Too slow")
def test_example_1():
pass
pytest.mark.usefixtures("graphscope_session")
pytest.mark.skip(reason="Too slow")
def test_example_1_detail_3_and_4():
pass
pytest.mark.usefixtures("graphscope_session")
pytest.mark.skip(reason="Too slow")
def test_torrents_and_ferraro_graph():
pass
| 46.521739
| 88
| 0.795327
| 352
| 3,210
| 6.96875
| 0.139205
| 0.176111
| 0.303302
| 0.352222
| 0.958011
| 0.958011
| 0.737872
| 0.635956
| 0.606604
| 0.582144
| 0
| 0.001418
| 0.121495
| 3,210
| 69
| 89
| 46.521739
| 0.86844
| 0
| 0
| 0.42
| 0
| 0
| 0.09156
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| true
| 0.06
| 0.52
| 0
| 0.58
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 9
|
3fd136ae287f5ffa884a9fd1f81002afedf40c9c
| 1,897
|
py
|
Python
|
integration-tests/train_command.py
|
luispedro/SemiBin
|
7a5c9c68bb29ec27b64d7b34ed88a2eab921314b
|
[
"MIT"
] | 25
|
2021-05-19T15:38:30.000Z
|
2022-03-18T09:28:32.000Z
|
integration-tests/train_command.py
|
luispedro/SemiBin
|
7a5c9c68bb29ec27b64d7b34ed88a2eab921314b
|
[
"MIT"
] | 39
|
2021-05-12T05:22:26.000Z
|
2022-03-31T13:28:46.000Z
|
integration-tests/train_command.py
|
luispedro/SemiBin
|
7a5c9c68bb29ec27b64d7b34ed88a2eab921314b
|
[
"MIT"
] | 5
|
2021-03-15T23:08:00.000Z
|
2021-05-07T07:31:03.000Z
|
import os
import pandas as pd
### Input fa
os.system('SemiBin train --data test/train_data/data.csv --data-split test/train_data/data_split.csv -c test/train_data/cannot.txt --epoches 1 --batch-size 2048 --mode single -i test/train_data/input.fasta -o output_train_fa -m 2500 --ratio 0.05 -p 1')
assert os.path.exists('output_train_fa/model.h5')
### Input .gz
os.system('SemiBin train --data test/train_data/data.csv --data-split test/train_data/data_split.csv -c test/train_data/cannot.txt --epoches 1 --batch-size 2048 --mode single -i test/train_data/input.fasta.gz -o output_train_gz -m 2500 --ratio 0.05 -p 1')
assert os.path.exists('output_train_gz/model.h5')
### Input .bz2
os.system('SemiBin train --data test/train_data/data.csv --data-split test/train_data/data_split.csv -c test/train_data/cannot.txt --epoches 1 --batch-size 2048 --mode single -i test/train_data/input.fasta.bz2 -o output_train_bz2 -m 2500 --ratio 0.05 -p 1')
assert os.path.exists('output_train_bz2/model.h5')
### Input .xz
os.system('SemiBin train --data test/train_data/data.csv --data-split test/train_data/data_split.csv -c test/train_data/cannot.txt --epoches 1 --batch-size 2048 --mode single -i test/train_data/input.fasta.xz -o output_train_xz -m 2500 --ratio 0.05 -p 1')
assert os.path.exists('output_train_xz/model.h5')
### train several samples
os.system('SemiBin train --data test/train_data/data.csv test/train_data/data.csv test/train_data/data.csv --data-split test/train_data/data_split.csv test/train_data/data_split.csv test/train_data/data_split.csv -c test/train_data/cannot.txt test/train_data/cannot.txt test/train_data/cannot.txt --epoches 1 --batch-size 2048 --mode several -i test/train_data/input.fasta.xz test/train_data/input.fasta.xz test/train_data/input.fasta.xz -o output_train_several_xz -m 2500 --ratio 0.05 -p 1')
assert os.path.exists('output_train_several_xz/model.h5')
| 67.75
| 492
| 0.766473
| 352
| 1,897
| 3.96875
| 0.125
| 0.212598
| 0.260558
| 0.170365
| 0.866142
| 0.866142
| 0.866142
| 0.865426
| 0.865426
| 0.846815
| 0
| 0.042849
| 0.089615
| 1,897
| 28
| 493
| 67.75
| 0.766068
| 0.032156
| 0
| 0
| 0
| 0.416667
| 0.864909
| 0.505217
| 0
| 0
| 0
| 0
| 0.416667
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
3fdaaf316d0c67195452a7174038bbed47fc519f
| 200,288
|
py
|
Python
|
notebooks/conservative_LMM.py
|
ranocha/Relaxation-LMM-notebooks
|
87761ba09cd83b754486a796e3942c29a8f87f2d
|
[
"MIT"
] | null | null | null |
notebooks/conservative_LMM.py
|
ranocha/Relaxation-LMM-notebooks
|
87761ba09cd83b754486a796e3942c29a8f87f2d
|
[
"MIT"
] | null | null | null |
notebooks/conservative_LMM.py
|
ranocha/Relaxation-LMM-notebooks
|
87761ba09cd83b754486a796e3942c29a8f87f2d
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.optimize import root, fsolve, newton, brentq, bisect
def compute_eocs(dts, errors):
eocs = np.zeros(len(errors) - 1)
for i in np.arange(len(errors) - 1):
eocs[i] = np.log(errors[i+1] / errors[i]) / np.log(dts[i+1] / dts[i])
return eocs
def compute_eoc(dts_, errors_):
dts = np.array(dts_)
errors = np.array(errors_)
idx = ~np.isnan(np.array(errors))
if np.any(idx):
return np.mean(compute_eocs(dts[idx], errors[idx]))
else:
return np.nan
def etaL2(u):
"""
The standard inner product norm (L^2) entropy.
"""
return 0.5 * np.dot(u, u)
def detaL2(u):
"""
The derivative of the standard inner product norm (L^2) entropy.
"""
return u
def compute_single_result(f, u, t_final, dt, scheme, num_steps, **kwargs):
"""
Compute the numerical solution obtained by the LMM `scheme` which
uses `num_steps` previous step/derivative values for the ODE
given by the right hand side `f` with analytical solution or
starting procedure `u` and a time step `dt`.
"""
t0 = 0.
u0 = u(t0)
t1 = dt
u1 = u(t1)
t2 = 2*dt
u2 = u(t2)
t3 = 3*dt
u3 = u(t3)
t4 = 4*dt
u4 = u(t4)
if num_steps == 2:
tt, uu, gamma = scheme(f, t_final, t0, u0, t1, u1,
return_gamma=True, **kwargs)
elif num_steps == 3:
tt, uu, gamma = scheme(f, t_final, t0, u0, t1, u1, t2, u2,
return_gamma=True, **kwargs)
elif num_steps == 4:
tt, uu, gamma = scheme(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
return_gamma=True, **kwargs)
elif num_steps == 5:
tt, uu, gamma = scheme(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3, t4, u4,
return_gamma=True, **kwargs)
else:
raise Exception("num_steps == %d not implemented yet." % (num_steps))
return tt, uu, gamma
def compute_convergence_data(f, u, t_final, dts, scheme, num_steps,
error_idx=None, fixed_coefficients_twice=False,
**kwargs):
"""
Compute the numerical errors obtained by the LMM `scheme` which
uses `num_steps` previous step/derivative values for the ODE
given by the right hand side `f` with analytical solution or
starting procedure `u` and time steps `dts`.
"""
error_b = []
gammaM1_b = []
error_p = []
gammaM1_p = []
error_rf = []
gammaM1_rf = []
error_rff = []
gammaM1_rff = []
error_ra = []
gammaM1_ra = []
error_idt = []
gammaM1_idt = []
for dt in dts:
try:
tt, uu, gamma = compute_single_result(f, u, t_final, dt, scheme, num_steps,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
**kwargs)
if np.any(error_idx == None):
error_b.append( np.linalg.norm(uu[-1] - u(tt[-1])) )
else:
error_b.append( np.linalg.norm(uu[-1][error_idx] - u(tt[-1])[error_idx]) )
gammaM1_b.append( np.linalg.norm(gamma - 1, ord=np.inf) )
except:
error_b.append(np.nan)
gammaM1_b.append(np.nan)
try:
tt, uu, gamma = compute_single_result(f, u, t_final, dt, scheme, num_steps,
projection=True, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
**kwargs)
if np.any(error_idx == None):
error_p.append( np.linalg.norm(uu[-1] - u(tt[-1])) )
else:
error_p.append( np.linalg.norm(uu[-1][error_idx] - u(tt[-1])[error_idx]) )
gammaM1_p.append( np.linalg.norm(gamma - 1, ord=np.inf) )
except:
error_p.append(np.nan)
gammaM1_p.append(np.nan)
try:
tt, uu, gamma = compute_single_result(f, u, t_final, dt, scheme, num_steps,
projection=False, relaxation=True,
adapt_dt=True, adapt_coefficients=False,
**kwargs)
if np.any(error_idx == None):
error_rf.append( np.linalg.norm(uu[-1] - u(tt[-1])) )
else:
error_rf.append( np.linalg.norm(uu[-1][error_idx] - u(tt[-1])[error_idx]) )
gammaM1_rf.append( np.linalg.norm(gamma - 1, ord=np.inf) )
except:
error_rf.append(np.nan)
gammaM1_rf.append(np.nan)
if fixed_coefficients_twice:
try:
tt, uu, gamma = compute_single_result(f, u, t_final, dt, scheme, num_steps,
projection=False, relaxation=True,
adapt_dt=True, adapt_coefficients=False,
fixed_coefficient_fix=True,
**kwargs)
if np.any(error_idx == None):
error_rff.append( np.linalg.norm(uu[-1] - u(tt[-1])) )
else:
error_rff.append( np.linalg.norm(uu[-1][error_idx] - u(tt[-1])[error_idx]) )
gammaM1_rff.append( np.linalg.norm(gamma - 1, ord=np.inf) )
except:
error_rff.append(np.nan)
gammaM1_rff.append(np.nan)
try:
tt, uu, gamma = compute_single_result(f, u, t_final, dt, scheme, num_steps,
projection=False, relaxation=True,
adapt_dt=True, adapt_coefficients=True,
**kwargs)
if np.any(error_idx == None):
error_ra.append( np.linalg.norm(uu[-1] - u(tt[-1])) )
else:
error_ra.append( np.linalg.norm(uu[-1][error_idx] - u(tt[-1])[error_idx]) )
gammaM1_ra.append( np.linalg.norm(gamma - 1, ord=np.inf) )
except:
error_ra.append(np.nan)
gammaM1_ra.append(np.nan)
try:
tt, uu, gamma = compute_single_result(f, u, t_final, dt, scheme, num_steps,
projection=False, relaxation=True,
adapt_dt=False, adapt_coefficients=False,
**kwargs)
if np.any(error_idx == None):
error_idt.append( np.linalg.norm(uu[-1] - u(tt[-1])) )
else:
error_idt.append( np.linalg.norm(uu[-1][error_idx] - u(tt[-1])[error_idx]) )
gammaM1_idt.append( np.linalg.norm(gamma - 1, ord=np.inf) )
except:
error_idt.append(np.nan)
gammaM1_idt.append(np.nan)
if fixed_coefficients_twice:
return error_b, gammaM1_b, error_p, gammaM1_p, error_rf, gammaM1_rf, error_rff, gammaM1_rff, error_ra, gammaM1_ra, error_idt, gammaM1_idt
else:
return error_b, gammaM1_b, error_p, gammaM1_p, error_rf, gammaM1_rf, error_ra, gammaM1_ra, error_idt, gammaM1_idt
class SolveForGammaException(BaseException):
def __init__(self, message, data):
self.message = message
self.data = data
def conservative_relaxation_solve(eta, deta, u_old, eta_old, u_new, old_gamma, method, tol, maxiter):
"""
Compute the relaxation factor `gamma` for a step from `u_old` to `u_new`
and the invariant `eta` with derivative `deta`.
The initial guess of `gamma` is `old_gamma` and the solution is obtained
by `method` using the tolerance `tol` and not more than `maxiter` iterations.
Possible `method`s are
- "newton"
- "simplified Newton"
- "brentq"
- "bisect"
- "hybr"
- "lm"
- "broyden1"
- "broyden2"
- "anderson"
- "linearmixing"
- "diagbroyden"
- "excitingmixing"
- "krylov"
- "df-sane"
"""
if eta == etaL2:
# assume eta == squared Euclidean inner product
# gamma = -2 * np.dot(u_old, u_new - u_old) / np.dot(u_new - u_old, u_new - u_old)
a = eta(u_old) - eta_old
b = np.dot(u_old, u_new - u_old)
c = eta(u_new - u_old)
if np.abs(a) < 1.0e-14:
gamma = -b / c
else:
gamma = (-b + np.sqrt(b*b - 4*a*c)) / (2*c)
return gamma
r = lambda gamma: eta(u_old + gamma * (u_new - u_old)) - eta_old
if method == "newton":
gamma = newton(r, old_gamma, tol=tol, maxiter=maxiter)
success = True
msg = "Newton method did not converge"
elif method == "simplified Newton":
eta_prime = deta(u_new)
denominator = np.dot(eta_prime, u_new - u_old)
gamma = old_gamma
delta_gamma = 10. * tol
iter = 0
val = r(gamma)
while np.abs(val) > tol and iter < maxiter:
delta_gamma = -val / denominator
gamma += delta_gamma
iter += 1
val = r(gamma)
u_new = u_old + gamma * (u_new - u_old)
success = iter < maxiter
msg = "'simplified Newton' method did not converge"
elif method == "brentq" or method == "bisect":
left = 0.9 * old_gamma
right = 1.1 * old_gamma
left_right_iter = 0
while r(left) * r(right) > 0:
left *= 0.9
right *= 1.1
left_right_iter += 1
if left_right_iter > 100:
raise SolveForGammaException(
"No suitable bounds found after %d iterations.\nLeft = %e; r(left) = %e\nRight = %e; r(right) = %e\n"%(
left_right_iter, left, r(left), right, r(right)),
u_old)
if method == "brentq":
gamma = brentq(r, left, right, xtol=tol, maxiter=maxiter)
else:
gamma = bisect(r, left, right, xtol=tol, maxiter=maxiter)
success = True
msg = "%s method did not converge"%method
else:
# Possible methods:
# hybr, lm, broyden1, broyden2, anderson, linearmixing, diagbroyden
# excitingmixing, krylov, df-sane
# See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html
sol = root(r, old_gamma, method=method, tol=tol,
options={'xtol': tol, 'maxiter': maxiter})
gamma = np.sum(sol.x); success = sol.success; msg = sol.message
if success == False:
print('Warning: fsolve did not converge.')
print(gamma)
print(msg)
if gamma <= 0:
print('Warning: gamma is negative.')
return gamma
def cons_or_diss_relaxation_solve(eta, deta, eta_est, u_old, eta_old, u_new, old_gamma, method, tol, maxiter):
"""
Compute the relaxation factor `gamma` for a step from `u_old` to `u_new`
and the general (conserved or dissipated) quantity of interest `eta`
with derivative `deta`. The previous value of `eta` is `eta_old`, the
desired estimate is `eta_est`.
The initial guess of `gamma` is `old_gamma` and the solution is obtained
by `method` using the tolerance `tol` and not more than `maxiter` iterations.
Possible `method`s are
- "newton"
- "simplified Newton"
- "brentq"
- "bisect"
- "hybr"
- "lm"
- "broyden1"
- "broyden2"
- "anderson"
- "linearmixing"
- "diagbroyden"
- "excitingmixing"
- "krylov"
- "df-sane"
"""
if eta == etaL2:
# assume eta == squared Euclidean inner product
# gamma = 2 * ( eta_est - eta_old - np.dot(u_old, u_new - u_old) ) / np.dot(u_new - u_old, u_new - u_old)
a = eta(u_old) - eta_old
b = np.dot(u_old, u_new - u_old) - eta_est + eta_old
c = eta(u_new - u_old)
if np.abs(a) < 1.0e-14:
gamma = -b / c
else:
gamma = (-b + np.sqrt(b*b - 4*a*c)) / (2*c)
return gamma
r = lambda gamma: eta(u_old + gamma * (u_new - u_old)) - eta_old - gamma * (eta_est - eta_old)
if method == "newton":
gamma = newton(r, old_gamma, tol=tol, maxiter=maxiter)
success = True
msg = "Newton method did not converge"
elif method == "simplified Newton":
eta_prime = deta(u_new)
denominator = np.dot(eta_prime, u_new - u_old) - (eta_est - eta_old)
gamma = old_gamma
delta_gamma = 10. * tol
iter = 0
val = r(gamma)
while np.abs(val) > tol and iter < maxiter:
delta_gamma = -val / denominator
gamma += delta_gamma
iter += 1
val = r(gamma)
u_new = u_old + gamma * (u_new - u_old)
success = iter < maxiter
msg = "'simplified Newton' method did not converge"
elif method == "brentq" or method == "bisect":
left = 0.9 * old_gamma
right = 1.1 * old_gamma
left_right_iter = 0
while r(left) * r(right) > 0:
left *= 0.9
right *= 1.1
left_right_iter += 1
if left_right_iter > 100:
raise SolveForGammaException(
"No suitable bounds found after %d iterations.\nLeft = %e; r(left) = %e\nRight = %e; r(right) = %e\n"%(
left_right_iter, left, r(left), right, r(right)),
u_old)
if method == "brentq":
gamma = brentq(r, left, right, xtol=tol, maxiter=maxiter)
else:
gamma = bisect(r, left, right, xtol=tol, maxiter=maxiter)
success = True
msg = "%s method did not converge"%method
else:
# Possible methods:
# hybr, lm, broyden1, broyden2, anderson, linearmixing, diagbroyden
# excitingmixing, krylov, df-sane
# See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.root.html
sol = root(r, old_gamma, method=method, tol=tol,
options={'xtol': tol, 'maxiter': maxiter})
gamma = np.sum(sol.x); success = sol.success; msg = sol.message
if success == False:
print('Warning: fsolve did not converge.')
print(gamma)
print(msg)
if gamma <= 0:
print('Warning: gamma is negative.')
return gamma
def conservative_projection_solve(eta, deta, u_old, eta_old, u_new, method, tol, maxiter):
"""
Compute the projection factor `gamma` and the projected value for a step from
`u_old` to `u_new` and the invariant `eta` with derivative `deta`.
The solution is obtained by `method` using the tolerance `tol` and not more
than `maxiter` iterations. Possible `method`s are
- "simplified Newton"
"""
if eta == etaL2:
# assume eta == 1/2 * squared Euclidean inner product
factor = np.sqrt( eta_old / eta(u_new) )
gamma = factor
u_new = factor * u_new
return gamma, u_new
if method == "simplified Newton":
eta_prime = deta(u_new)
denominator = (np.dot(eta_prime, eta_prime) + 1.e-16)
gamma_p1 = 0.
delta_gamma = 10.
iter = 0
while delta_gamma > tol and iter < maxiter:
delta_gamma = -(eta(u_new + gamma_p1*eta_prime) - eta_old) / denominator
gamma_p1 += delta_gamma
iter += 1
gamma = gamma_p1 + 1.0
u_new = u_new + gamma_p1 * eta_prime
success = iter < maxiter
msg = "'simplified Newton' method did not converge"
else:
raise Exception("Method %s not implemented yet." % (method))
if success == False:
print('Warning: fsolve did not converge.')
print(msg)
return gamma, u_new
def cons_or_diss_projection_solve(eta, deta, eta_est, u_old, eta_old, u_new, method, tol, maxiter):
"""
Compute the projection factor `gamma` and the projected value for a step from
`u_old` to `u_new` and the general (conserved or dissipated) quantity of interest
`eta` with derivative `deta`. The previous value of `eta` is `eta_old`, the
desired estimate is `eta_est`.
The solution is obtained by `method` using the tolerance `tol` and not more
than `maxiter` iterations. Possible `method`s are
- "simplified Newton"
"""
if eta == etaL2:
# assume eta == 1/2 * squared Euclidean inner product
factor = np.sqrt( eta_est / eta(u_new) )
gamma = factor
u_new = factor * u_new
return gamma, u_new
if method == "simplified Newton":
eta_prime = deta(u_new)
denominator = (np.dot(eta_prime, eta_prime) + 1.e-16)
gamma_p1 = 0.
delta_gamma = 10.
iter = 0
while delta_gamma > tol and iter < maxiter:
delta_gamma = -(eta(u_new + gamma_p1*eta_prime) - eta_est) / denominator
gamma_p1 += delta_gamma
iter += 1
gamma = gamma_p1 + 1.0
u_new = u_new + gamma_p1 * eta_prime
success = iter < maxiter
msg = "'simplified Newton' method did not converge"
else:
raise Exception("Method %s not implemented yet." % (method))
if success == False:
print('Warning: fsolve did not converge.')
print(msg)
return gamma, u_new
def conservative_LMM(f, t_final, initial_t, initial_u,
fixed_step, adaptive_step,
idx_u_old=-1,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
fixed_coefficient_fix=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
uu = [u for u in initial_u]
ff = [f(u) for u in initial_u]
tt = [t for t in initial_t]
if len(uu) != len(tt):
raise Exception("You must provide the same number of initial values for `t` and `u`.")
if len(uu) < 2:
raise Exception("You must provide at least 2 initial values for `t` and `u`.")
h = tt[1] - tt[0]
old_omega = [(tt[i+1] - tt[i]) / h for i in np.arange(len(tt)-1)]
old_gamma = [1.0 for i in np.arange(len(tt)-1)]
old_eta = [eta(uu[i]) for i in np.arange(len(uu))]
if relaxation and projection:
raise Exception("Use either relaxation or projection, not both.")
if relaxation and method == None:
method = "brentq"
elif projection and method == None:
method = "simplified Newton"
if callable(idx_u_old):
old_weights_func = idx_u_old
elif not hasattr(idx_u_old, '__iter__'):
old_weights = [0.0 for u in uu]
old_weights[idx_u_old] = 1.0
old_weights_func = lambda old_omega: old_weights
else:
old_weights_func = lambda old_omega: idx_u_old
t = tt[-1]
gammas = [1.0 for t in initial_t]
step = 0
while t < t_final and step < maxsteps:
step += 1
if relaxation and adapt_coefficients:
u_new = adaptive_step(uu, ff, h, old_omega)
else:
u_new = fixed_step(uu, ff, h)
old_weights = old_weights_func(old_omega)
u_old = sum(old_weights[idx]*uu[idx] for idx in np.arange(-len(old_weights), 0))
eta_old = sum(old_weights[idx]*old_eta[idx] for idx in np.arange(-len(old_weights), 0))
if projection:
gamma, u_new = conservative_projection_solve(eta, deta, u_old, eta_old, u_new, method, tol, maxiter)
elif relaxation:
gamma = conservative_relaxation_solve(eta, deta, u_old, eta_old, u_new, old_gamma[-1], method, tol, maxiter)
u_new = u_old + gamma * (u_new - u_old)
for i in np.arange(-len(old_gamma), -1):
old_gamma[i] = old_gamma[i+1]
old_gamma[-1] = gamma
else:
gamma = 1.0
if return_gamma:
gammas.append(gamma)
uu.append(u_new)
if relaxation and adapt_dt:
t_old = np.sum([old_weights[idx]*tt[idx] for idx in np.arange(-len(old_weights), 0)])
if fixed_coefficient_fix and not adapt_coefficients:
t_diff = -h * np.sum([idx*old_weights[idx] for idx in np.arange(-len(old_weights), 0)])
else:
t_diff = tt[-1] + h - t_old
t = t_old + gamma * t_diff
if adapt_coefficients:
# new_omega = -idx_u_old*gamma - np.sum([old_omega[i] for i in np.arange(-1, idx_u_old, -1)])
new_omega = (t - tt[-1]) / h
for i in np.arange(-len(old_omega), -1):
old_omega[i] = old_omega[i+1]
old_omega[-1] = new_omega
if gamma < 1.0e-14:
raise Exception("gamma = %.2e is too small in step %d!" % (gamma, step))
else:
t += h
tt.append(t)
for i in np.arange(-len(ff), -1):
ff[i] = ff[i+1]
ff[-1] = f(u_new)
for i in np.arange(-len(old_eta), -1):
old_eta[i] = old_eta[i+1]
old_eta[-1] = eta(u_new)
if return_gamma:
return np.array(tt), uu, np.array(gammas)
else:
return np.array(tt), uu
def cons_or_diss_LMM(f, t_final, initial_t, initial_u,
fixed_step, adaptive_step,
fixed_estimate, adaptive_estimate,
idx_u_old=-1,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
fixed_coefficient_fix=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
uu = [u for u in initial_u]
ff = [f(u) for u in initial_u]
tt = [t for t in initial_t]
if len(uu) != len(tt):
raise Exception("You must provide the same number of initial values for `t` and `u`.")
if len(uu) < 2:
raise Exception("You must provide at least 2 initial values for `t` and `u`.")
h = tt[1] - tt[0]
old_omega = [(tt[i+1] - tt[i]) / h for i in np.arange(len(tt)-1)]
old_gamma = [1.0 for i in np.arange(len(tt)-1)]
old_eta = [eta(uu[i]) for i in np.arange(len(uu))]
old_deta_f = [np.dot(deta(uu[i]), ff[i]) for i in np.arange(len(uu))]
if relaxation and projection:
raise Exception("Use either relaxation or projection, not both.")
if relaxation and method == None:
method = "brentq"
elif projection and method == None:
method = "simplified Newton"
if callable(idx_u_old):
old_weights_func = idx_u_old
elif not hasattr(idx_u_old, '__iter__'):
old_weights = [0.0 for u in uu]
old_weights[idx_u_old] = 1.0
old_weights_func = lambda old_omega: old_weights
else:
old_weights_func = lambda old_omega: idx_u_old
t = tt[-1]
gammas = [1.0 for t in initial_t]
step = 0
while t < t_final and step < maxsteps:
step += 1
if relaxation and adapt_coefficients:
u_new = adaptive_step(uu, ff, h, old_omega)
eta_est = adaptive_estimate(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega)
else:
u_new = fixed_step(uu, ff, h)
eta_est = fixed_estimate(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h)
old_weights = old_weights_func(old_omega)
u_old = sum(old_weights[idx]*uu[idx] for idx in np.arange(-len(old_weights), 0))
eta_old = sum(old_weights[idx]*old_eta[idx] for idx in np.arange(-len(old_weights), 0))
if projection:
gamma, u_new = cons_or_diss_projection_solve(eta, deta, eta_est, u_old, eta_old, u_new, method, tol, maxiter)
elif relaxation:
gamma = cons_or_diss_relaxation_solve(eta, deta, eta_est, u_old, eta_old, u_new, old_gamma[-1], method, tol, maxiter)
u_new = u_old + gamma * (u_new - u_old)
for i in np.arange(-len(old_gamma), -1):
old_gamma[i] = old_gamma[i+1]
old_gamma[-1] = gamma
else:
gamma = 1.0
if return_gamma:
gammas.append(gamma)
uu.append(u_new)
if relaxation and adapt_dt:
t_old = np.sum([old_weights[idx]*tt[idx] for idx in np.arange(-len(old_weights), 0)])
if fixed_coefficient_fix and not adapt_coefficients:
t_diff = -h * np.sum([idx*old_weights[idx] for idx in np.arange(-len(old_weights), 0)])
else:
t_diff = tt[-1] + h - t_old
t = t_old + gamma * t_diff
if adapt_coefficients:
# new_omega = -idx_u_old*gamma - np.sum([old_omega[i] for i in np.arange(-1, idx_u_old, -1)])
new_omega = (t - tt[-1]) / h
for i in np.arange(-len(old_omega), -1):
old_omega[i] = old_omega[i+1]
old_omega[-1] = new_omega
if gamma < 1.0e-14:
raise Exception("gamma = %.2e is too small in step %d!" % (gamma, step))
else:
t += h
tt.append(t)
for i in np.arange(-len(ff), -1):
ff[i] = ff[i+1]
ff[-1] = f(u_new)
for i in np.arange(-len(old_eta), -1):
old_eta[i] = old_eta[i+1]
old_eta[-1] = eta(u_new)
for i in np.arange(-len(old_deta_f), -1):
old_deta_f[i] = old_deta_f[i+1]
old_deta_f[-1] = np.dot(deta(u_new), ff[-1])
if return_gamma:
return np.array(tt), uu, np.array(gammas)
else:
return np.array(tt), uu
# explicit Adams methods
def fixed_step_AB2(uu, ff, h):
du_new = (
1.5
) * ff[-1] + (
-0.5
) * ff[-2]
u_new = uu[-1] + h * du_new
return u_new
def fixed_estimate_AB2(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
du1int = (
0.625
) * ff[-1] + (
-0.125
) * ff[-2]
u1int = uu[-1] + h * du1int
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def adaptive_step_AB2(uu, ff, h, old_omega):
om1 = old_omega[-1]
du_new = (
1 + 1/(2.*om1)
) * ff[-1] + (
-1/(2.*om1)
) * ff[-2]
u_new = uu[-1] + h * du_new
return u_new
def adaptive_estimate_AB2(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om1 = old_omega[-1]
du1int = (
(4 + 1./om1)/8.
) * ff[-1] + (
-1/(8.*om1)
) * ff[-2]
u1int = uu[-1] + h * du1int
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def conservative_AB2(f, t_final, t0, u0, t1, u1,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1], [u0, u1],
fixed_step_AB2, adaptive_step_AB2,
**kwargs)
def cons_or_diss_AB2(f, t_final, t0, u0, t1, u1,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1], [u0, u1],
fixed_step_AB2, adaptive_step_AB2,
fixed_estimate_AB2, adaptive_estimate_AB2,
**kwargs)
def fixed_step_AB3(uu, ff, h):
u_new = uu[-1] + h * ( (23./12.)*ff[-1] - (16./12.)*ff[-2] + (5./12.)*ff[-3])
return u_new
def fixed_estimate_AB3(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
du1int = (
0.24639141243202012
) * ff[-1] + (
-0.04780399468440579
) * ff[-2] + (
0.012737447657572745
) * ff[-3]
u1int = uu[-1] + h * du1int
du2int = (
1.3369419209013131
) * ff[-1] + (
-0.7855293386489275
) * ff[-2] + (
0.23726255234242727
) * ff[-3]
u2int = uu[-1] + h * du2int
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_AB3(uu, ff, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
du_new = (
(2 + 3*om2 + 6*om1 * (1 + om1 + om2)) / (6*om1 * (om1 + om2))
) * ff[-1] + (
-(2 + 3 * (om1 + om2)) / (6 * om1 * om2)
) * ff[-2] + (
(2 + 3*om1) / (6 * om2 * (om1 + om2))
) * ff[-3]
u_new = uu[-1] + h * du_new
return u_new
def adaptive_estimate_AB3(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
du1int = (
-(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om2 + 18*om1*(-2 + np.sqrt(3) + (-3 + np.sqrt(3))*om1 + (-3 + np.sqrt(3))*om2))/(108.*om1*(om1 + om2))
) * ff[-1] + (
(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om1 + 9*(-2 + np.sqrt(3))*om2)/(108.*om1*om2)
) * ff[-2] + (
(9 - 5*np.sqrt(3) - 9*(-2 + np.sqrt(3))*om1)/(108.*om2*(om1 + om2))
) * ff[-3]
u1int = uu[-1] + h * du1int
du2int = (
(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om2 + 18*om1*(2 + np.sqrt(3) + (3 + np.sqrt(3))*om1 + (3 + np.sqrt(3))*om2))/(108.*om1*(om1 + om2))
) * ff[-1] + (
-(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om1 + 9*(2 + np.sqrt(3))*om2)/(108.*om1*om2)
) * ff[-2] + (
(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om1)/(108.*om2*(om1 + om2))
) * ff[-3]
u2int = uu[-1] + h * du2int
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_AB3(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_AB3, adaptive_step_AB3,
**kwargs)
def cons_or_diss_AB3(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_AB3, adaptive_step_AB3,
fixed_estimate_AB3, adaptive_estimate_AB3,
**kwargs)
def fixed_step_AB4(uu, ff, h):
du_new = (
2.2916666666666665
) * ff[-1] + (
-2.4583333333333335
) * ff[-2] + (
1.5416666666666667
) * ff[-3] + (
-0.375
) * ff[-4]
u_new = uu[-1] + h * du_new
return u_new
def fixed_estimate_AB4(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
du1int = (
0.2554904416411566
) * ff[-1] + (
-0.07510108231181513
) * ff[-2] + (
0.04003453528498212
) * ff[-3] + (
-0.009099029209136444
) * ff[-4]
u1int = uu[-1] + h * du1int
du2int = (
1.5384910398403246
) * ff[-1] + (
-1.3901766954659625
) * ff[-2] + (
0.8419099091594622
) * ff[-3] + (
-0.2015491189390117
) * ff[-4]
u2int = uu[-1] + h * du2int
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_AB4(uu, ff, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
du_new = (
(3 + 8*om2 + 4*om3 + 6*(2*(om1*om1*om1) + om2*(om2 + om3) + 2*om1*(1 + om2)*(1 + om2 + om3) + om1*om1*(3 + 4*om2 + 2*om3)))/(12.*om1*(om1 + om2)*(om1 + om2 + om3))
) * ff[-1] + (
-(3 + 6*(om1*om1) + 8*om2 + 4*om3 + 6*om2*(om2 + om3) + 2*om1*(4 + 6*om2 + 3*om3))/(12.*om1*om2*(om2 + om3))
) * ff[-2] + (
(3 + 4*om2 + 4*om3 + 2*om1*(4 + 3*om1 + 3*om2 + 3*om3))/(12.*om2*(om1 + om2)*om3)
) * ff[-3] + (
-(3 + 4*om2 + 2*om1*(4 + 3*om1 + 3*om2))/(12.*om3*(om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
u_new = uu[-1] + h * du_new
return u_new
def adaptive_estimate_AB4(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
du1int = (
(21 - 12*np.sqrt(3) + 8*(9 - 5*np.sqrt(3))*om2 + 4*(9 - 5*np.sqrt(3))*om3 + 12*(-6*(-3 + np.sqrt(3))*(om1*om1*om1) - 3*(-2 + np.sqrt(3))*om2*(om2 + om3) + 3*(om1*om1)*(6 - 3*np.sqrt(3) - 4*(-3 + np.sqrt(3))*om2 - 2*(-3 + np.sqrt(3))*om3) + om1*(9 - 5*np.sqrt(3) - 6*(-2 + np.sqrt(3))*om3 - 6*om2*(2*(-2 + np.sqrt(3)) + (-3 + np.sqrt(3))*om2 + (-3 + np.sqrt(3))*om3))))/(432.*om1*(om1 + om2)*(om1 + om2 + om3))
) * ff[-1] + (
(3*(-7 + 4*np.sqrt(3)) + 8*(-9 + 5*np.sqrt(3))*om2 + 4*(-9 + 5*np.sqrt(3))*om3 + 4*(9*(-2 + np.sqrt(3))*(om1*om1) + 9*(-2 + np.sqrt(3))*om2*(om2 + om3) + om1*(2*(-9 + 5*np.sqrt(3)) + 18*(-2 + np.sqrt(3))*om2 + 9*(-2 + np.sqrt(3))*om3)))/(432.*om1*om2*(om2 + om3))
) * ff[-2] + (
(21 - 12*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om2 + 4*(9 - 5*np.sqrt(3))*om3 + 4*om1*(18 - 10*np.sqrt(3) - 9*(-2 + np.sqrt(3))*om1 - 9*(-2 + np.sqrt(3))*om2 - 9*(-2 + np.sqrt(3))*om3))/(432.*om2*(om1 + om2)*om3)
) * ff[-3] + (
(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om2 + 4*om1*(2*(-9 + 5*np.sqrt(3)) + 9*(-2 + np.sqrt(3))*om1 + 9*(-2 + np.sqrt(3))*om2))/(432.*om3*(om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
u1int = uu[-1] + h * du1int
du2int = (
(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 12*(6*(3 + np.sqrt(3))*(om1*om1*om1) + 3*(2 + np.sqrt(3))*om2*(om2 + om3) + 3*(om1*om1)*(3*(2 + np.sqrt(3)) + 4*(3 + np.sqrt(3))*om2 + 2*(3 + np.sqrt(3))*om3) + om1*(9 + 5*np.sqrt(3) + 6*(2 + np.sqrt(3))*om3 + 6*om2*(2*(2 + np.sqrt(3)) + (3 + np.sqrt(3))*om2 + (3 + np.sqrt(3))*om3))))/(432.*om1*(om1 + om2)*(om1 + om2 + om3))
) * ff[-1] + (
-(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(9*(2 + np.sqrt(3))*(om1*om1) + 9*(2 + np.sqrt(3))*om2*(om2 + om3) + om1*(2*(9 + 5*np.sqrt(3)) + 18*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3)))/(432.*om1*om2*(om2 + om3))
) * ff[-2] + (
(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*om1*(2*(9 + 5*np.sqrt(3)) + 9*(2 + np.sqrt(3))*om1 + 9*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3))/(432.*om2*(om1 + om2)*om3)
) * ff[-3] + (
-(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om2 + 4*om1*(2*(9 + 5*np.sqrt(3)) + 9*(2 + np.sqrt(3))*om1 + 9*(2 + np.sqrt(3))*om2))/(432.*om3*(om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
u2int = uu[-1] + h * du2int
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_AB4(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_AB4, adaptive_step_AB4,
**kwargs)
def cons_or_diss_AB4(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_AB4, adaptive_step_AB4,
fixed_estimate_AB4, adaptive_estimate_AB4,
**kwargs)
def fixed_step_AB5(uu, ff, h):
du_new = (
2.640277777777778
) * ff[-1] + (
-3.852777777777778
) * ff[-2] + (
3.6333333333333333
) * ff[-3] + (
-1.7694444444444444
) * ff[-4] + (
0.3486111111111111
) * ff[-5]
u_new = uu[-1] + h * du_new
return u_new
def adaptive_step_AB5(uu, ff, h, old_omega):
om4 = old_omega[-4]
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
du_new = (
(3*(4 + 15*om2 + 10*om3 + 5*om4) + 10*(6*(om1*om1*om1*om1) + 3*(om2*om2*om2) + 4*om2*om4 + 2*om3*(om3 + om4) + 6*om1*(1 + om2)*(1 + om2 + om3)*(1 + om2 + om3 + om4) + 3*(om2*om2)*(2 + 2*om3 + om4) + 6*(om1*om1*om1)*(2 + 3*om2 + 2*om3 + om4) + om2*om3*(8 + 3*om3 + 3*om4) + 3*(om1*om1)*(4 + 6*(om2*om2) + 3*om4 + 2*om3*(3 + om3 + om4) + om2*(9 + 8*om3 + 4*om4))))/(60.*om1*(om1 + om2)*(om1 + om2 + om3)*(om1 + om2 + om3 + om4))
) * ff[-1] + (
-(30*(om1*om1*om1) + 30*(om1*om1)*(2 + 3*om2 + 2*om3 + om4) + 3*(4 + 10*om3 + 5*om4) + 5*om1*(9 + 16*om3 + 8*om4 + 6*(3*(om2*om2) + om3*(om3 + om4) + 2*om2*(2 + 2*om3 + om4))) + 5*(6*(om2*om2*om2) + 4*om3*(om3 + om4) + 6*(om2*om2)*(2 + 2*om3 + om4) + om2*(9 + 8*om4 + 2*om3*(8 + 3*om3 + 3*om4))))/(60.*om1*om2*(om2 + om3)*(om2 + om3 + om4))
) * ff[-2] + (
(3*(4 + 10*om2 + 10*om3 + 5*om4) + 5*(6*(om1*om1*om1) + 4*(om2 + om3)*(om2 + om3 + om4) + 6*(om1*om1)*(2*(1 + om2 + om3) + om4) + om1*(9 + 6*(om2*om2) + 16*om3 + 8*om4 + 6*om3*(om3 + om4) + 2*om2*(8 + 6*om3 + 3*om4))))/(60.*om2*(om1 + om2)*om3*(om3 + om4))
) * ff[-3] + (
-(3*(4 + 10*om2 + 5*om3 + 5*om4) + 5*(6*(om1*om1*om1) + 4*om2*(om2 + om3 + om4) + 6*(om1*om1)*(2 + 2*om2 + om3 + om4) + om1*(9 + 8*om3 + 8*om4 + 2*om2*(8 + 3*om2 + 3*om3 + 3*om4))))/(60.*om3*(om2 + om3)*(om1 + om2 + om3)*om4)
) * ff[-4] + (
(3*(4 + 10*om2 + 5*om3) + 5*(6*(om1*om1*om1) + 4*om2*(om2 + om3) + 6*(om1*om1)*(2 + 2*om2 + om3) + om1*(9 + 8*om3 + 2*om2*(8 + 3*om2 + 3*om3))))/(60.*om4*(om3 + om4)*(om2 + om3 + om4)*(om1 + om2 + om3 + om4))
) * ff[-5]
u_new = uu[-1] + h * du_new
return u_new
def conservative_AB5(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3, t4, u4,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3, t4], [u0, u1, u2, u3, u4],
fixed_step_AB5, adaptive_step_AB5,
**kwargs)
# Nyström methods based on the idea u_{n} = u_{n-2} + \int_{t_{n-2}}^{t_{n}} f
def fixed_step_Nyström2(uu, ff, h):
u_new = uu[-2] + 2 * h * ff[-1]
return u_new
def fixed_estimate_Nyström2(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = uu[-2] + h * (
(
1.125
) * ff[-1] + (
0.375
) * ff[-2]
)
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def adaptive_step_Nyström2(uu, ff, h, old_omega):
om1 = old_omega[-1]
u_new = uu[-2] + h * (
(
2./om1
) * ff[-1] + (
2 - 2./om1
) * ff[-2]
)
return u_new
def adaptive_estimate_Nyström2(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om1 = old_omega[-1]
u1int = uu[-2] + h * (
(
((1 + 2*om1)*(1 + 2*om1))/(8.*om1)
) * ff[-1] + (
-1/(8.*om1) + om1/2.
) * ff[-2]
)
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def conservative_Nyström2(f, t_final, t0, u0, t1, u1,
idx_u_old=-2,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1], [u0, u1],
fixed_step_Nyström2, adaptive_step_Nyström2,
idx_u_old=idx_u_old,
**kwargs)
def cons_or_diss_Nyström2(f, t_final, t0, u0, t1, u1,
idx_u_old=-2,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1], [u0, u1],
fixed_step_Nyström2, adaptive_step_Nyström2,
fixed_estimate_Nyström2, adaptive_estimate_Nyström2,
idx_u_old=idx_u_old,
**kwargs)
def fixed_step_Nyström3(uu, ff, h):
u_new = uu[-2] + h * (
(
2.3333333333333335
) * ff[-1] + (
-0.6666666666666666
) * ff[-2] + (
0.3333333333333333
) * ff[-3]
)
return u_new
def fixed_estimate_Nyström3(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = uu[-2] + h * (
(
0.6630580790986869
) * ff[-1] + (
0.618862671982261
) * ff[-2] + (
-0.07059588567576056
) * ff[-3]
)
u2int = uu[-2] + h * (
(
1.7536085875679797
) * ff[-1] + (
-0.11886267198226091
) * ff[-2] + (
0.15392921900909387
) * ff[-3]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_Nyström3(uu, ff, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = uu[-2] + h * (
(
(8 + 6*om2)/(3*(om1*om1) + 3*om1*om2)
) * ff[-1] + (
(2*(-4 - 3*om2 + 3*om1*(1 + om2)))/(3.*om1*om2)
) * ff[-2] + (
(8 - 6*om1)/(3*om1*om2 + 3*(om2*om2))
) * ff[-3]
)
return u_new
def adaptive_estimate_Nyström3(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int =uu[-2] + h * (
(
(9 - 5*np.sqrt(3) - 9*(-2 + np.sqrt(3))*om2 + 18*om1*(2 - np.sqrt(3) - (-3 + np.sqrt(3))*om2 + om1*(3 - np.sqrt(3) + 2*om1 + 3*om2)))/(108.*om1*(om1 + om2))
) * ff[-1] + (
(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om2 + 9*om1*(-2 + np.sqrt(3) + 2*om1*(om1 + 3*om2)))/(108.*om1*om2)
) * ff[-2] + (
(9 - 5*np.sqrt(3) - 9*om1*(-2 + np.sqrt(3) + 2*(om1*om1)))/(108.*om2*(om1 + om2))
) * ff[-3]
)
u2int = uu[-2] + h * (
(
(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om2 + 18*om1*(2 + np.sqrt(3) + (3 + np.sqrt(3))*om2 + om1*(3 + np.sqrt(3) + 2*om1 + 3*om2)))/(108.*om1*(om1 + om2))
) * ff[-1] + (
-(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om2 + 9*om1*(2 + np.sqrt(3) - 2*om1*(om1 + 3*om2)))/(108.*om1*om2)
) * ff[-2] + (
(9 + 5*np.sqrt(3) + 9*om1*(2 + np.sqrt(3) - 2*(om1*om1)))/(108.*om2*(om1 + om2))
) * ff[-3]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_Nyström3(f, t_final, t0, u0, t1, u1, t2, u2,
idx_u_old=-2,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_Nyström3, adaptive_step_Nyström3,
idx_u_old=idx_u_old,
**kwargs)
def cons_or_diss_Nyström3(f, t_final, t0, u0, t1, u1, t2, u2,
idx_u_old=-2,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_Nyström3, adaptive_step_Nyström3,
fixed_estimate_Nyström3, adaptive_estimate_Nyström3,
idx_u_old=idx_u_old,
**kwargs)
def fixed_step_Nyström4(uu, ff, h):
u_new = uu[-2] + h * (
(
2.6666666666666665
) * ff[-1] + (
-1.6666666666666667
) * ff[-2] + (
1.3333333333333333
) * ff[-3] + (
-0.3333333333333333
) * ff[-4]
)
return u_new
def fixed_estimate_Nyström4(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = uu[-2] + h * (
(
0.6304904416411566
) * ff[-1] + (
0.7165655843548515
) * ff[-2] + (
-0.1682987980483512
) * ff[-3] + (
0.03256763745753022
) * ff[-4]
)
u2int = uu[-2] + h * (
(
1.9134910398403246
) * ff[-1] + (
-0.5985100287992959
) * ff[-2] + (
0.6335765758261289
) * ff[-3] + (
-0.15988245227234502
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_Nyström4(uu, ff, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = uu[-2] + h * (
(
(2*(6 + 4*om3 + om2*(8 + 3*om2 + 3*om3)))/(3.*om1*(om1 + om2)*(om1 + om2 + om3))
) * ff[-1] + (
(2*(-6 - 4*om3 - om2*(8 + 3*om2 + 3*om3) + om1*(4 + 3*om3 + 3*om2*(2 + om2 + om3))))/(3.*om1*om2*(om2 + om3))
) * ff[-2] + (
(2*(6 + 4*om2 + 4*om3 - om1*(4 + 3*om2 + 3*om3)))/(3.*om2*(om1 + om2)*om3)
) * ff[-3] + (
(2*(-6 - 4*om2 + om1*(4 + 3*om2)))/(3.*om3*(om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
)
return u_new
def adaptive_estimate_Nyström4(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = uu[-2] + h * (
(
(21 - 12*np.sqrt(3) + 8*(9 - 5*np.sqrt(3))*om2 + 4*(9 - 5*np.sqrt(3))*om3 + 12*(9*(om1*om1*om1*om1) - 3*(-2 + np.sqrt(3))*om2*(om2 + om3) + 6*(om1*om1*om1)*(3 - np.sqrt(3) + 4*om2 + 2*om3) + 3*(om1*om1)*(6 - 3*np.sqrt(3) - 2*(-3 + np.sqrt(3))*om3 + 2*om2*(6 - 2*np.sqrt(3) + 3*om2 + 3*om3)) + om1*(9 - 5*np.sqrt(3) - 6*(-2 + np.sqrt(3))*om3 - 6*om2*(2*(-2 + np.sqrt(3)) + (-3 + np.sqrt(3))*om2 + (-3 + np.sqrt(3))*om3))))/(432.*om1*(om1 + om2)*(om1 + om2 + om3))
) * ff[-1] + (
(3*(-7 + 4*np.sqrt(3)) + 8*(-9 + 5*np.sqrt(3))*om2 + 4*(-9 + 5*np.sqrt(3))*om3 + 4*(9*(om1*om1*om1*om1) + 9*(-2 + np.sqrt(3))*om2*(om2 + om3) + 18*(om1*om1*om1)*(2*om2 + om3) + om1*(2*(-9 + 5*np.sqrt(3)) + 18*(-2 + np.sqrt(3))*om2 + 9*(-2 + np.sqrt(3))*om3) + 9*(om1*om1)*(-2 + np.sqrt(3) + 6*om2*(om2 + om3))))/(432.*om1*om2*(om2 + om3))
) * ff[-2] + (
-(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om2 + 4*(-9 + 5*np.sqrt(3))*om3 + 4*om1*(2*(-9 + 5*np.sqrt(3)) + 9*(-2 + np.sqrt(3))*om2 + 9*(-2 + np.sqrt(3))*om3 + 9*om1*(-2 + np.sqrt(3) + om1*om1 + 2*om1*(om2 + om3))))/(432.*om2*(om1 + om2)*om3)
) * ff[-3] + (
(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om2 + 4*om1*(2*(-9 + 5*np.sqrt(3)) + 9*(-2 + np.sqrt(3))*om2 + 9*om1*(-2 + np.sqrt(3) + om1*om1 + 2*om1*om2)))/(432.*om3*(om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
)
u2int = uu[-2] + h * (
(
(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 12*(9*(om1*om1*om1*om1) + 3*(2 + np.sqrt(3))*om2*(om2 + om3) + 6*(om1*om1*om1)*(3 + np.sqrt(3) + 4*om2 + 2*om3) + 3*(om1*om1)*(3*(2 + np.sqrt(3)) + 2*(3 + np.sqrt(3))*om3 + 2*om2*(6 + 2*np.sqrt(3) + 3*om2 + 3*om3)) + om1*(9 + 5*np.sqrt(3) + 6*(2 + np.sqrt(3))*om3 + 6*om2*(2*(2 + np.sqrt(3)) + (3 + np.sqrt(3))*om2 + (3 + np.sqrt(3))*om3))))/(432.*om1*(om1 + om2)*(om1 + om2 + om3))
) * ff[-1] + (
-(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(-9*(om1*om1*om1*om1) + 9*(2 + np.sqrt(3))*om2*(om2 + om3) - 18*(om1*om1*om1)*(2*om2 + om3) + om1*(2*(9 + 5*np.sqrt(3)) + 18*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3) + 9*(om1*om1)*(2 + np.sqrt(3) - 6*om2*(om2 + om3))))/(432.*om1*om2*(om2 + om3))
) * ff[-2] + (
(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*om1*(2*(9 + 5*np.sqrt(3)) + 9*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3 + 9*om1*(2 + np.sqrt(3) - om1*(om1 + 2*(om2 + om3)))))/(432.*om2*(om1 + om2)*om3)
) * ff[-3] + (
-(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om2 + 4*om1*(2*(9 + 5*np.sqrt(3)) + 9*(2 + np.sqrt(3))*om2 + 9*om1*(2 + np.sqrt(3) - om1*(om1 + 2*om2))))/(432.*om3*(om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_Nyström4(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
idx_u_old=-2,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_Nyström4, adaptive_step_Nyström4,
idx_u_old=idx_u_old,
**kwargs)
def cons_or_diss_Nyström4(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
idx_u_old=-2,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_Nyström4, adaptive_step_Nyström4,
fixed_estimate_Nyström4, adaptive_estimate_Nyström4,
idx_u_old=idx_u_old,
**kwargs)
#NOTE: This method does not work well with relaxation
def conservative_Nyström2mod(f, t_final, t0, u0, t1, u1,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
uu = [u0, u1]
ff = [f(u0), f(u1)]
tt = [t0, t1]
old_omega = [1.0, 1.0]
old_gamma = [1.0, 1.0]
h = t1 - t0
if relaxation and projection:
raise Exception("Use either relaxation or projection, not both.")
if relaxation and method == None:
method = "brentq"
elif projection and method == None:
method = "simplified Newton"
t = t1
gammas = [1.0, 1.0]
step = 0
while t < t_final and step < maxsteps:
step += 1
if relaxation and adapt_coefficients:
om1 = old_omega[-1]
du_new = (2.0) / (om1) * ff[-1]
du_new = du_new + 2*(om1 - 1) / (om1) * ff[-2]
u_new = uu[-2] + h * du_new
else:
u_new = uu[-2] + 2 * h * ff[-1]
u_old = uu[-1]
eta_old = eta(u_old)
if projection:
gamma, u_new = conservative_projection_solve(eta, deta, u_old, eta_old, u_new, method, tol, maxiter)
elif relaxation:
gamma = conservative_relaxation_solve(eta, deta, u_old, eta_old, u_new, old_gamma[-1], method, tol, maxiter)
u_new = u_old + gamma * (u_new - u_old)
old_gamma[-2] = old_gamma[-1]
old_gamma[-1] = gamma
else:
gamma = 1.0
if return_gamma:
gammas.append(gamma)
uu.append(u_new)
if relaxation and adapt_dt:
t = tt[-1] + gamma * h
old_omega[-2] = old_omega[-1]
old_omega[-1] = gamma
if gamma < 1.0e-14:
raise Exception("gamma = %.2e is too small in step %d!" % (gamma, step))
else:
t = tt[-1] + h
tt.append(t)
ff[-2] = ff[-1]
ff[-1] = f(u_new)
if return_gamma:
return np.array(tt), uu, np.array(gammas)
else:
return np.array(tt), uu
#NOTE: This method does not work well with relaxation
def conservative_Nyström3mod(f, t_final, t0, u0, t1, u1, t2, u2,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
uu = [u0, u1, u2]
ff = [f(u0), f(u1), f(u2)]
tt = [t0, t1, t2]
old_omega = [1.0, 1.0, 1.0]
old_gamma = [1.0, 1.0, 1.0]
h = t1 - t0
np.testing.assert_approx_equal(h, t2 - t1)
if relaxation and projection:
raise Exception("Use either relaxation or projection, not both.")
if relaxation and method == None:
method = "brentq"
elif projection and method == None:
method = "simplified Newton"
t = t2
gammas = [1.0, 1.0, 1.0]
step = 0
while t < t_final and step < maxsteps:
step += 1
if relaxation and adapt_coefficients:
om2 = old_omega[-2]
om1 = old_omega[-1]
du_new = 2 * (4 + 3*om2) / (3 * om1 * (om1 + om2)) * ff[-1]
du_new = du_new - 2 * (4 + 3*om2 - 3*om1 * (1 + om2)) / (3 * om1 * om2) * ff[-2]
du_new = du_new + (8 - 6*om1) / (3 * om2 * (om1 + om2)) * ff[-3]
u_new = uu[-2] + h * du_new
else:
u_new = uu[-2] + h * ((7.0/3.0) * ff[-1] - (2.0/3.0) * ff[-2] + (1.0/3.0) * ff[-3])
u_old = uu[-1]
eta_old = eta(u_old)
if projection:
gamma, u_new = conservative_projection_solve(eta, deta, u_old, eta_old, u_new, method, tol, maxiter)
elif relaxation:
gamma = conservative_relaxation_solve(eta, deta, u_old, eta_old, u_new, old_gamma[-1], method, tol, maxiter)
u_new = u_old + gamma * (u_new - u_old)
old_gamma[-3] = old_gamma[-2]
old_gamma[-2] = old_gamma[-1]
old_gamma[-1] = gamma
else:
gamma = 1.0
if return_gamma:
gammas.append(gamma)
uu.append(u_new)
if relaxation and adapt_dt:
t = tt[-1] + gamma * h
old_omega[-3] = old_omega[-2]
old_omega[-2] = old_omega[-1]
old_omega[-1] = gamma
if gamma < 1.0e-14:
raise Exception("gamma = %.2e is too small in step %d!" % (gamma, step))
else:
t = tt[-1] + h
tt.append(t)
ff[-3] = ff[-2]
ff[-2] = ff[-1]
ff[-1] = f(u_new)
if return_gamma:
return np.array(tt), uu, np.array(gammas)
else:
return np.array(tt), uu
#NOTE: This method does not work well with relaxation
def conservative_Nyström4mod(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
uu = [u0, u1, u2, u3]
ff = [f(u0), f(u1), f(u2), f(u3)]
tt = [t0, t1, t2, t3]
old_omega = [1.0, 1.0, 1.0, 1.0]
old_gamma = [1.0, 1.0, 1.0, 1.0]
h = t1 - t0
np.testing.assert_approx_equal(h, t2 - t1)
np.testing.assert_approx_equal(h, t3 - t2)
if relaxation and projection:
raise Exception("Use either relaxation or projection, not both.")
if relaxation and method == None:
method = "brentq"
elif projection and method == None:
method = "simplified Newton"
t = t3
gammas = [1.0, 1.0, 1.0, 1.0]
step = 0
while t < t_final and step < maxsteps:
step += 1
if relaxation and adapt_coefficients:
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
du_new = 2 * (6 + 4*om3 + om2 * (8 + 3*om2 + 3*om3)) / (3*om1 * (om1 + om2) * (om1 + om2 + om3)) * ff[-1]
du_new = du_new - 2 * (6 + 4*om3 + om2 * (8 + 3*om2 + 3*om3) - om1 * (4 + 3*om3 + 3*om2 * (2 + om2 + om3))) / (3 * om1 * om2 * (om2 + om3)) * ff[-2]
du_new = du_new + 2 * (6 + 4*om2 + 4*om3 - om1 * (4 + 3*om2 + 3*om3)) / (3 * om2 * (om1 + om2) * om3) * ff[-3]
du_new = du_new - 2 * (6 + 4*om2 - om1 * (4 + 3*om2)) / (3 * om3 * (om2 + om3) * (om1 + om2 + om3)) * ff[-4]
u_new = uu[-2] + h * du_new
else:
u_new = uu[-2] + h * ((8.0/3.0) * ff[-1] - (5.0/3.0) * ff[-2] + (4.0/3.0) * ff[-3] - (1.0/3.0) * ff[-4])
u_old = uu[-1]
eta_old = eta(u_old)
if projection:
gamma, u_new = conservative_projection_solve(eta, deta, u_old, eta_old, u_new, method, tol, maxiter)
elif relaxation:
gamma = conservative_relaxation_solve(eta, deta, u_old, eta_old, u_new, old_gamma[-1], method, tol, maxiter)
u_new = u_old + gamma * (u_new - u_old)
old_gamma[-4] = old_gamma[-3]
old_gamma[-3] = old_gamma[-2]
old_gamma[-2] = old_gamma[-1]
old_gamma[-1] = gamma
else:
gamma = 1.0
if return_gamma:
gammas.append(gamma)
uu.append(u_new)
if relaxation and adapt_dt:
t = tt[-1] + gamma * h
old_omega[-4] = old_omega[-3]
old_omega[-3] = old_omega[-2]
old_omega[-2] = old_omega[-1]
old_omega[-1] = gamma
if gamma < 1.0e-14:
raise Exception("gamma = %.2e is too small in step %d!" % (gamma, step))
else:
t = tt[-1] + h
tt.append(t)
ff[-4] = ff[-3]
ff[-3] = ff[-2]
ff[-2] = ff[-1]
ff[-1] = f(u_new)
if return_gamma:
return np.array(tt), uu, np.array(gammas)
else:
return np.array(tt), uu
# Nyström methods with extension to variable stepsizes by Arévalo & Söderlind (2017)
def fixed_step_Nyström2AS(uu, ff, h):
u_new = (
0.
) * uu[-1] + (
1.
) * uu[-2] + h * (
(
2.
) * ff[-1]
)
return u_new
def fixed_estimate_Nyström2AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
0.75
) * uu[-1] + (
0.25
) * uu[-2] + h * (
(
0.75
) * ff[-1]
)
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def adaptive_step_Nyström2AS(uu, ff, h, old_omega):
om1 = old_omega[-1]
u_new = (
1 - 1./(om1*om1)
) * uu[-1] + (
1./(om1*om1)
) * uu[-2] + h * (
(
1 + 1./om1
) * ff[-1]
)
return u_new
def adaptive_estimate_Nyström2AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om1 = old_omega[-1]
u1int = (
1 - 1/(4.*(om1*om1))
) * uu[-1] + (
1/(4.*(om1*om1))
) * uu[-2] + h * (
(
(2 + 1/om1)/4.
) * ff[-1]
)
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def conservative_Nyström2AS(f, t_final, t0, u0, t1, u1,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1], [u0, u1],
fixed_step_Nyström2AS, adaptive_step_Nyström2AS,
**kwargs)
def cons_or_diss_Nyström2AS(f, t_final, t0, u0, t1, u1,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1], [u0, u1],
fixed_step_Nyström2AS, adaptive_step_Nyström2AS,
fixed_estimate_Nyström2AS, adaptive_estimate_Nyström2AS,
**kwargs)
def fixed_step_Nyström3AS(uu, ff, h):
u_new = (
0.
) * uu[-1] + (
1.
) * uu[-2] + h * (
(
2.3333333333333335
) * ff[-1] + (
-0.6666666666666666
) * ff[-2] + (
0.3333333333333333
) * ff[-3]
)
return u_new
def fixed_estimate_Nyström3AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
0.9641470039866955
) * uu[-1] + (
0.03585299601330434
) * uu[-2] + h * (
(
0.26133016077089677
) * ff[-1] + (
-0.023901997342202896
) * ff[-2] + (
0.009749697989797416
) * ff[-3]
)
u2int = (
0.41085299601330433
) * uu[-1] + (
0.5891470039866956
) * uu[-2] + h * (
(
1.5824198392291033
) * ff[-1] + (
-0.39276466932446374
) * ff[-2] + (
0.18816696867686925
) * ff[-3]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_Nyström3AS(uu, ff, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
(-2 - 3*om2 + om1*(-3 + om1*om1 + 7*om1*om2))/(om1*om1*(om1 + 7*om2))
) * uu[-1] + (
(2 + 3*om1 + 3*om2)/(om1*om1*(om1 + 7*om2))
) * uu[-2] + h * (
(
(3*om1*((1 + om1)*(1 + om1)) + 2*(5 + 3*om1*(5 + 4*om1))*om2 + 3*(5 + 7*om1)*(om2*om2))/(3.*om1*(om1 + om2)*(om1 + 7*om2))
) * ff[-1] + (
-((4 + 6*om1 + 6*om2)/(3*(om1*om1) + 21*om1*om2))
) * ff[-2] + (
(7 + 9*om1)/(3.*(om1 + om2)*(om1 + 7*om2))
) * ff[-3]
)
return u_new
def adaptive_estimate_Nyström3AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = (
(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om2 + 9*om1*(-2 + np.sqrt(3) + 2*om1*(om1 + 7*om2)))/(18.*(om1*om1)*(om1 + 7*om2))
) * uu[-1] + (
-(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om1 + 9*(-2 + np.sqrt(3))*om2)/(18.*(om1*om1)*(om1 + 7*om2))
) * uu[-2] + h * (
(
-(18*(-3 + np.sqrt(3))*(om1*om1*om1) + 36*(om1*om1)*(-2 + np.sqrt(3) + 4*(-3 + np.sqrt(3))*om2) + 10*om2*(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om2) + 3*om1*(-9 + 5*np.sqrt(3) + 60*(-2 + np.sqrt(3))*om2 + 42*(-3 + np.sqrt(3))*(om2*om2)))/(108.*om1*(om1 + om2)*(om1 + 7*om2))
) * ff[-1] + (
(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om1 + 9*(-2 + np.sqrt(3))*om2)/(27.*om1*(om1 + 7*om2))
) * ff[-2] + (
(63 - 35*np.sqrt(3) - 54*(-2 + np.sqrt(3))*om1)/(108.*(om1 + om2)*(om1 + 7*om2))
) * ff[-3]
)
u2int = (
-(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om2 + 9*om1*(2 + np.sqrt(3) - 2*om1*(om1 + 7*om2)))/(18.*(om1*om1)*(om1 + 7*om2))
) * uu[-1] + (
(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om1 + 9*(2 + np.sqrt(3))*om2)/(18.*(om1*om1)*(om1 + 7*om2))
) * uu[-2] + h * (
(
(18*(3 + np.sqrt(3))*(om1*om1*om1) + 10*om2*(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om2) + 36*(om1*om1)*(2 + np.sqrt(3) + 4*(3 + np.sqrt(3))*om2) + 3*om1*(9 + 5*np.sqrt(3) + 60*(2 + np.sqrt(3))*om2 + 42*(3 + np.sqrt(3))*(om2*om2)))/(108.*om1*(om1 + om2)*(om1 + 7*om2))
) * ff[-1] + (
-(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om1 + 9*(2 + np.sqrt(3))*om2)/(27.*om1*(om1 + 7*om2))
) * ff[-2] + (
(7*(9 + 5*np.sqrt(3)) + 54*(2 + np.sqrt(3))*om1)/(108.*(om1 + om2)*(om1 + 7*om2))
) * ff[-3]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_Nyström3AS(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_Nyström3AS, adaptive_step_Nyström3AS,
**kwargs)
def cons_or_diss_Nyström3AS(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_Nyström3AS, adaptive_step_Nyström3AS,
fixed_estimate_Nyström3AS, adaptive_estimate_Nyström3AS,
**kwargs)
def fixed_step_Nyström4AS(uu, ff, h):
u_new = (
0.
) * uu[-1] + (
1.
) * uu[-2] + h * (
(
2.6666666666666665
) * ff[-1] + (
-1.6666666666666667
) * ff[-2] + (
1.3333333333333333
) * ff[-3] + (
-0.3333333333333333
) * ff[-4]
)
return u_new
def fixed_estimate_Nyström4AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
0.9694504071951937
) * uu[-1] + (
0.030549592804806153
) * uu[-2] + h * (
(
0.26694653894295894
) * ff[-1] + (
-0.050915988008010254
) * ff[-2] + (
0.03367003678398088
) * ff[-3] + (
-0.007826129508936195
) * ff[-4]
)
u2int = (
0.4345043950646931
) * uu[-1] + (
0.5654956049353068
) * uu[-2] + h * (
(
1.7505518916910652
) * ff[-1] + (
-0.9424926748921779
) * ff[-2] + (
0.7240983247979401
) * ff[-3] + (
-0.17798680206670725
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_Nyström4AS(uu, ff, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
(-3 + om1*om1*om1*om1 - 8*om2 - 4*om3 - 6*om2*(om2 + om3) + 2*(om1*om1*om1)*(2*om2 + om3) - 2*om1*(4 + 6*om2 + 3*om3) + om1*om1*(-6 + 26*om2*(om2 + om3)))/(om1*om1*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-1] + (
(3 + 6*(om1*om1) + 8*om2 + 4*om3 + 6*om2*(om2 + om3) + 2*om1*(4 + 6*om2 + 3*om3))/(om1*om1*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-2] + h * (
(
(2*(3*(om1*om1)*((1 + om1)*(1 + om1)*(1 + om1)) + 9*om1*((1 + om1)*(1 + om1))*(1 + 2*om1)*om2 + 3*(8 + om1*(38 + 5*om1*(12 + 7*om1)))*(om2*om2) + 8*(8 + 3*om1*(8 + 7*om1))*(om2*om2*om2) + 6*(8 + 13*om1)*(om2*om2*om2*om2)) + 3*(1 + 2*om1 + 2*om2)*(3*om1*((1 + om1)*(1 + om1)) + 2*(8 + om1*(19 + 16*om1))*om2 + 4*(8 + 13*om1)*(om2*om2))*om3 + 4*(3*om1*((1 + om1)*(1 + om1)) + 2*(8 + 3*om1*(8 + 7*om1))*om2 + 3*(8 + 13*om1)*(om2*om2))*(om3*om3))/(6.*om1*(om1 + om2)*(om1 + om2 + om3)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-1] + (
(-5*(3 + 6*(om1*om1) + 8*om2 + 4*om3 + 6*om2*(om2 + om3) + 2*om1*(4 + 6*om2 + 3*om3)))/(3.*om1*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-2] + (
(3*(om1*om1*om1) + 13*(om2 + om3)*(3 + 4*om2 + 4*om3) + om1*om1*(6 + 75*om2 + 75*om3) + om1*(3 + 72*(om2*om2) + 8*om3*(13 + 9*om3) + 8*om2*(13 + 18*om3)))/(6.*(om1 + om2)*om3*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-3] + (
-(3*om1*((1 + om1)*(1 + om1)) + (39 + om1*(104 + 75*om1))*om2 + 4*(13 + 18*om1)*(om2*om2))/(6.*om3*(om1 + om2 + om3)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-4]
)
return u_new
def adaptive_estimate_Nyström4AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = (
(3*(-7 + 4*np.sqrt(3)) + 8*(-9 + 5*np.sqrt(3))*om2 + 4*(-9 + 5*np.sqrt(3))*om3 + 4*(9*(om1*om1*om1*om1) + 9*(-2 + np.sqrt(3))*om2*(om2 + om3) + 18*(om1*om1*om1)*(2*om2 + om3) + om1*(2*(-9 + 5*np.sqrt(3)) + 18*(-2 + np.sqrt(3))*om2 + 9*(-2 + np.sqrt(3))*om3) + 9*(om1*om1)*(-2 + np.sqrt(3) + 26*om2*(om2 + om3))))/(36.*(om1*om1)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-1] + (
(21 - 12*np.sqrt(3) + 8*(9 - 5*np.sqrt(3))*om2 + 4*(9 - 5*np.sqrt(3))*om3 + 4*(-9*(-2 + np.sqrt(3))*(om1*om1) - 9*(-2 + np.sqrt(3))*om2*(om2 + om3) + om1*(18 - 10*np.sqrt(3) - 18*(-2 + np.sqrt(3))*om2 - 9*(-2 + np.sqrt(3))*om3)))/(36.*(om1*om1)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-2] + h * (
(
-(36*(-3 + np.sqrt(3))*(om1*om1*om1*om1*om1) + 108*(om1*om1*om1*om1)*(-2 + np.sqrt(3) + 2*(-3 + np.sqrt(3))*om2 + (-3 + np.sqrt(3))*om3) + 18*(om1*om1*om1)*(-9 + 5*np.sqrt(3) + 30*(-2 + np.sqrt(3))*om2 + 70*(-3 + np.sqrt(3))*(om2*om2) + 15*(-2 + np.sqrt(3))*om3 + 70*(-3 + np.sqrt(3))*om2*om3 + 4*(-3 + np.sqrt(3))*(om3*om3)) + 16*om2*(om2 + om3)*(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om3 + 4*om2*(2*(-9 + 5*np.sqrt(3)) + 9*(-2 + np.sqrt(3))*om2 + 9*(-2 + np.sqrt(3))*om3)) + 6*(om1*om1)*(-7 + 4*np.sqrt(3) + 336*(-3 + np.sqrt(3))*(om2*om2*om2) + 72*(om2*om2)*(5*(-2 + np.sqrt(3)) + 7*(-3 + np.sqrt(3))*om3) + 6*om3*(-9 + 5*np.sqrt(3) + 4*(-2 + np.sqrt(3))*om3) + 12*om2*(-9 + 5*np.sqrt(3) + 30*(-2 + np.sqrt(3))*om3 + 14*(-3 + np.sqrt(3))*(om3*om3))) - 3*om1*(-312*(-3 + np.sqrt(3))*(om2*om2*om2*om2) + om3*(21 - 12*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om3) - 48*(om2*om2*om2)*(16*(-2 + np.sqrt(3)) + 13*(-3 + np.sqrt(3))*om3) - 4*(om2*om2)*(19*(-9 + 5*np.sqrt(3)) + 288*(-2 + np.sqrt(3))*om3 + 78*(-3 + np.sqrt(3))*(om3*om3)) + om2*(42 - 24*np.sqrt(3) + 4*om3*(171 - 95*np.sqrt(3) - 96*(-2 + np.sqrt(3))*om3))))/(216.*om1*(om1 + om2)*(om1 + om2 + om3)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-1] + (
(5*(3*(-7 + 4*np.sqrt(3)) + 8*(-9 + 5*np.sqrt(3))*om2 + 4*(-9 + 5*np.sqrt(3))*om3 + 4*(9*(-2 + np.sqrt(3))*(om1*om1) + 9*(-2 + np.sqrt(3))*om2*(om2 + om3) + om1*(2*(-9 + 5*np.sqrt(3)) + 18*(-2 + np.sqrt(3))*om2 + 9*(-2 + np.sqrt(3))*om3))))/(108.*om1*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-2] + (
(-18*(-2 + np.sqrt(3))*(om1*om1*om1) + 39*(7 - 4*np.sqrt(3))*(om2 + om3) - 52*(-9 + 5*np.sqrt(3))*((om2 + om3)*(om2 + om3)) - 6*(om1*om1)*(-9 + 5*np.sqrt(3) + 75*(-2 + np.sqrt(3))*om2 + 75*(-2 + np.sqrt(3))*om3) + om1*(21 - 12*np.sqrt(3) - 432*(-2 + np.sqrt(3))*(om2*om2) + 8*om2*(117 - 65*np.sqrt(3) - 108*(-2 + np.sqrt(3))*om3) + 8*om3*(117 - 65*np.sqrt(3) - 54*(-2 + np.sqrt(3))*om3)))/(216.*(om1 + om2)*om3*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-3] + (
(18*(-2 + np.sqrt(3))*(om1*om1*om1) + 6*(om1*om1)*(-9 + 5*np.sqrt(3) + 75*(-2 + np.sqrt(3))*om2) + 13*om2*(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om2) + om1*(3*(-7 + 4*np.sqrt(3)) + 8*om2*(13*(-9 + 5*np.sqrt(3)) + 54*(-2 + np.sqrt(3))*om2)))/(216.*om3*(om1 + om2 + om3)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-4]
)
u2int = (
-(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(-9*(om1*om1*om1*om1) + 9*(2 + np.sqrt(3))*om2*(om2 + om3) - 18*(om1*om1*om1)*(2*om2 + om3) + om1*(2*(9 + 5*np.sqrt(3)) + 18*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3) + 9*(om1*om1)*(2 + np.sqrt(3) - 26*om2*(om2 + om3))))/(36.*(om1*om1)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-1] + (
(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(9*(2 + np.sqrt(3))*(om1*om1) + 9*(2 + np.sqrt(3))*om2*(om2 + om3) + om1*(2*(9 + 5*np.sqrt(3)) + 18*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3)))/(36.*(om1*om1)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-2] + h * (
(
(36*(3 + np.sqrt(3))*(om1*om1*om1*om1*om1) + 108*(om1*om1*om1*om1)*(2 + np.sqrt(3) + 2*(3 + np.sqrt(3))*om2 + (3 + np.sqrt(3))*om3) + 18*(om1*om1*om1)*(9 + 5*np.sqrt(3) + 30*(2 + np.sqrt(3))*om2 + 70*(3 + np.sqrt(3))*(om2*om2) + 15*(2 + np.sqrt(3))*om3 + 70*(3 + np.sqrt(3))*om2*om3 + 4*(3 + np.sqrt(3))*(om3*om3)) + 16*om2*(om2 + om3)*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3 + 4*om2*(2*(9 + 5*np.sqrt(3)) + 9*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3)) + 6*(om1*om1)*(7 + 4*np.sqrt(3) + 336*(3 + np.sqrt(3))*(om2*om2*om2) + 6*om3*(9 + 5*np.sqrt(3) + 4*(2 + np.sqrt(3))*om3) + 72*(om2*om2)*(5*(2 + np.sqrt(3)) + 7*(3 + np.sqrt(3))*om3) + 12*om2*(9 + 5*np.sqrt(3) + 30*(2 + np.sqrt(3))*om3 + 14*(3 + np.sqrt(3))*(om3*om3))) + 3*om1*(312*(3 + np.sqrt(3))*(om2*om2*om2*om2) + 48*(om2*om2*om2)*(16*(2 + np.sqrt(3)) + 13*(3 + np.sqrt(3))*om3) + om3*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3) + om2*(6*(7 + 4*np.sqrt(3)) + 76*(9 + 5*np.sqrt(3))*om3 + 384*(2 + np.sqrt(3))*(om3*om3)) + 4*(om2*om2)*(19*(9 + 5*np.sqrt(3)) + 288*(2 + np.sqrt(3))*om3 + 78*(3 + np.sqrt(3))*(om3*om3))))/(216.*om1*(om1 + om2)*(om1 + om2 + om3)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-1] + (
(-5*(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(9*(2 + np.sqrt(3))*(om1*om1) + 9*(2 + np.sqrt(3))*om2*(om2 + om3) + om1*(2*(9 + 5*np.sqrt(3)) + 18*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3))))/(108.*om1*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-2] + (
(18*(2 + np.sqrt(3))*(om1*om1*om1) + 39*(7 + 4*np.sqrt(3))*(om2 + om3) + 52*(9 + 5*np.sqrt(3))*((om2 + om3)*(om2 + om3)) + 6*(om1*om1)*(9 + 5*np.sqrt(3) + 75*(2 + np.sqrt(3))*om2 + 75*(2 + np.sqrt(3))*om3) + om1*(3*(7 + 4*np.sqrt(3)) + 432*(2 + np.sqrt(3))*(om2*om2) + 8*om3*(13*(9 + 5*np.sqrt(3)) + 54*(2 + np.sqrt(3))*om3) + 8*om2*(13*(9 + 5*np.sqrt(3)) + 108*(2 + np.sqrt(3))*om3)))/(216.*(om1 + om2)*om3*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-3] + (
-(18*(2 + np.sqrt(3))*(om1*om1*om1) + 6*(om1*om1)*(9 + 5*np.sqrt(3) + 75*(2 + np.sqrt(3))*om2) + 13*om2*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om2) + om1*(3*(7 + 4*np.sqrt(3)) + 8*om2*(13*(9 + 5*np.sqrt(3)) + 54*(2 + np.sqrt(3))*om2)))/(216.*om3*(om1 + om2 + om3)*(om1*om1 + 26*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_Nyström4AS(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_Nyström4AS, adaptive_step_Nyström4AS,
**kwargs)
def cons_or_diss_Nyström4AS(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_Nyström4AS, adaptive_step_Nyström4AS,
fixed_estimate_Nyström4AS, adaptive_estimate_Nyström4AS,
**kwargs)
# methods based on the idea $u_{n+k} = u_{n-k} + \int_{t_{n-k}}^{t_{n+k}} f$
def fixed_step_Leapfrog4(uu, ff, h):
u_new = uu[-4] + h * (
(
2.6666666666666665
) * ff[-1] + (
-1.3333333333333333
) * ff[-2] + (
2.6666666666666665
) * ff[-3]
)
return u_new
def fixed_estimate_Leapfrog4(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = uu[-4] + h * (
(
0.6304904416411566
) * ff[-1] + (
1.0498989176881848
) * ff[-2] + (
1.165034535284982
) * ff[-3] + (
0.36590097079086353
) * ff[-4]
)
u2int = uu[-4] + h * (
(
1.9134910398403246
) * ff[-1] + (
-0.2651766954659626
) * ff[-2] + (
1.9669099091594622
) * ff[-3] + (
0.1734508810609883
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_Leapfrog4(uu, ff, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = uu[-4] + h * (
(
-((2 + om2 + om3)*(2 + om2 + om3)*(-12 + (-4 + om2)*om2 - (-4 + om3)*om3))/(12.*om1*(om1 + om2)*(om1 + om2 + om3))
) * ff[-1] + (
((2 + om2 + om3)*(2 + om2 + om3)*((2 + om2)*(-6 + 4*om1 + om2) - 2*(-2 + om1)*om3 - om3*om3))/(12.*om1*om2*(om2 + om3))
) * ff[-2] + (
((2 + om2 + om3)*(2 + om2 + om3)*(2*om1*(-4 + om2 + om3) - 4*(-3 + om2 + om3) + (om2 + om3)*(om2 + om3)))/(12.*om2*(om1 + om2)*om3)
) * ff[-3] + (
(-16*(3 + 2*om2) - (om2 - 3*om3)*((om2 + om3)*(om2 + om3)*(om2 + om3)) + om1*(-2*(om2*om2*om2) + 6*om2*(4 + om3*om3) + 4*(8 + om3*om3*om3)))/(12.*om3*(om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
)
return u_new
def adaptive_estimate_Leapfrog4(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = uu[-4] + h * (
(
(21 - 12*np.sqrt(3) + 8*(9 - 5*np.sqrt(3))*om2 + 4*(9 - 5*np.sqrt(3))*om3 + 12*(9*(om1*om1*om1*om1) + 6*(om1*om1*om1)*(3 - np.sqrt(3) + 4*om2 + 2*om3) + 3*(om1*om1)*(6 - 3*np.sqrt(3) - 2*(-3 + np.sqrt(3))*om3 + 2*om2*(6 - 2*np.sqrt(3) + 3*om2 + 3*om3)) + om1*(9 - 5*np.sqrt(3) - 6*(-2 + np.sqrt(3))*om3 - 6*om2*(2*(-2 + np.sqrt(3)) + (-3 + np.sqrt(3))*om2 + (-3 + np.sqrt(3))*om3)) - 3*(om2 + om3)*(om2*om2*om2 + om2*om2*om3 - om3*om3*om3 + om2*(-2 + np.sqrt(3) - om3*om3))))/(432.*om1*(om1 + om2)*(om1 + om2 + om3))
) * ff[-1] + (
(3*(-7 + 4*np.sqrt(3)) + 8*(-9 + 5*np.sqrt(3))*om2 + 4*(-9 + 5*np.sqrt(3))*om3 + 4*(9*(om1*om1*om1*om1) + 18*(om1*om1*om1)*(2*om2 + om3) + 9*(om1*om1)*(-2 + np.sqrt(3) + 6*om2*(om2 + om3)) + om1*(2*(-9 + 5*np.sqrt(3)) + 18*(-2 + np.sqrt(3))*om2 + 36*(om2*om2*om2) + 54*(om2*om2)*om3 + 9*om3*(-2 + np.sqrt(3) - 2*(om3*om3))) + 9*(om2 + om3)*(om2*om2*om2 + om2*om2*om3 - om3*om3*om3 + om2*(-2 + np.sqrt(3) - om3*om3))))/(432.*om1*om2*(om2 + om3))
) * ff[-2] + (
(21 - 12*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om2 + 4*(9 - 5*np.sqrt(3))*om3 + 4*(-9*(-2 + np.sqrt(3))*(om1*om1) - 9*(om1*om1*om1*om1) - 18*(om1*om1*om1)*(om2 + om3) + 9*((om2 + om3)*(om2 + om3)*(om2 + om3)*(om2 + om3)) + om1*(18 - 10*np.sqrt(3) - 9*(-2 + np.sqrt(3))*om3 + 9*(2*(om2*om2*om2) + 6*(om2*om2)*om3 + 2*(om3*om3*om3) + om2*(2 - np.sqrt(3) + 6*(om3*om3))))))/(432.*om2*(om1 + om2)*om3)
) * ff[-3] + (
(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om2 + 4*(9*(-2 + np.sqrt(3))*(om1*om1) + 9*(om1*om1*om1*om1) + 18*(om1*om1*om1)*om2 - 9*(om2 - 3*om3)*((om2 + om3)*(om2 + om3)*(om2 + om3)) + om1*(-18*(om2*om2*om2) + 9*om2*(-2 + np.sqrt(3) + 6*(om3*om3)) + 2*(-9 + 5*np.sqrt(3) + 18*(om3*om3*om3)))))/(432.*om3*(om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
)
u2int = uu[-4] + h * (
(
(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 12*(9*(om1*om1*om1*om1) + 6*(om1*om1*om1)*(3 + np.sqrt(3) + 4*om2 + 2*om3) + 3*(om1*om1)*(3*(2 + np.sqrt(3)) + 2*(3 + np.sqrt(3))*om3 + 2*om2*(6 + 2*np.sqrt(3) + 3*om2 + 3*om3)) + om1*(9 + 5*np.sqrt(3) + 6*(2 + np.sqrt(3))*om3 + 6*om2*(2*(2 + np.sqrt(3)) + (3 + np.sqrt(3))*om2 + (3 + np.sqrt(3))*om3)) - 3*(om2 + om3)*(om2*om2*om2 + om2*om2*om3 - om3*om3*om3 - om2*(2 + np.sqrt(3) + om3*om3))))/(432.*om1*(om1 + om2)*(om1 + om2 + om3))
) * ff[-1] + (
-(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(-9*(om1*om1*om1*om1) - 18*(om1*om1*om1)*(2*om2 + om3) + 9*(om1*om1)*(2 + np.sqrt(3) - 6*om2*(om2 + om3)) - 9*(om2 + om3)*(om2*om2*om2 + om2*om2*om3 - om3*om3*om3 - om2*(2 + np.sqrt(3) + om3*om3)) + om1*(2*(9 + 5*np.sqrt(3)) + 9*(2 + np.sqrt(3))*om3 + 18*((2 + np.sqrt(3))*om2 - 2*(om2*om2*om2) - 3*(om2*om2)*om3 + om3*om3*om3))))/(432.*om1*om2*(om2 + om3))
) * ff[-2] + (
(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(9*(2 + np.sqrt(3))*(om1*om1) - 9*(om1*om1*om1*om1) - 18*(om1*om1*om1)*(om2 + om3) + 9*((om2 + om3)*(om2 + om3)*(om2 + om3)*(om2 + om3)) + om1*(2*(9 + 5*np.sqrt(3)) + 18*(om2*om2*om2) + 54*(om2*om2)*om3 + 9*om3*(2 + np.sqrt(3) + 2*(om3*om3)) + 9*om2*(2 + np.sqrt(3) + 6*(om3*om3)))))/(432.*om2*(om1 + om2)*om3)
) * ff[-3] + (
-(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om2 + 4*(9*(2 + np.sqrt(3))*(om1*om1) - 9*(om1*om1*om1*om1) - 18*(om1*om1*om1)*om2 + 9*(om2 - 3*om3)*((om2 + om3)*(om2 + om3)*(om2 + om3)) + om1*(18*(om2*om2*om2) + 9*om2*(2 + np.sqrt(3) - 6*(om3*om3)) + 2*(9 + 5*np.sqrt(3) - 18*(om3*om3*om3)))))/(432.*om3*(om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_Leapfrog4(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
idx_u_old=-4,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_Leapfrog4, adaptive_step_Leapfrog4,
idx_u_old=idx_u_old,
**kwargs)
def cons_or_diss_Leapfrog4(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
idx_u_old=-4,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_Leapfrog4, adaptive_step_Leapfrog4,
fixed_estimate_Leapfrog4, adaptive_estimate_Leapfrog4,
idx_u_old=idx_u_old,
**kwargs)
#NOTE: This method does not work well with relaxation
def conservative_Leapfrog4mod(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
uu = [u0, u1, u2, u3]
ff = [f(u0), f(u1), f(u2), f(u3)]
tt = [t0, t1, t2, t3]
old_omega = [1.0, 1.0, 1.0, 1.0]
old_gamma = [1.0, 1.0, 1.0, 1.0]
h = t1 - t0
np.testing.assert_approx_equal(h, t2 - t1)
np.testing.assert_approx_equal(h, t3 - t2)
if relaxation and projection:
raise Exception("Use either relaxation or projection, not both.")
if relaxation and method == None:
method = "brentq"
elif projection and method == None:
method = "simplified Newton"
t = t3
gammas = [1.0, 1.0, 1.0, 1.0]
step = 0
while t < t_final and step < maxsteps:
step += 1
if relaxation and adapt_coefficients:
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
du_new = 8 * (24 + om3 * (-16 + 3*om3) + om2 * (-8 + 3*om3)) / (3*om1 * (om1 + om2) * (om1 + om2 + om3)) * ff[-1]
du_new = du_new - 8 * (3*om3 * (om2 + om3) - 8 * (-3 + om2 + 2*om3) + om1 * (-8 + 3*om3)) / (3 * om1 * om2 * (om2 + om3)) * ff[-2]
du_new = du_new + 8 * (3 * (om2 + om3)*(om2 + om3) - 8 * (-3 + 2*om2 + 2*om3) + om1 * (-8 + 3*om2 + 3*om3)) / (3*om2 * (om1 + om2) * om3) * ff[-3]
du_new = du_new + 4 * (16 * (-3 + 2*om2 + 3*om3) + 3 * (om2 + om3) * (om2 * (-2 + om3) + (-6 + om3) * om3) + om1 * (16 + 3*om2 * (-2 + om3) + 3 * (-4 + om3) * om3)) / (3 * om3 * (om2 + om3) * (om1 + om2 + om3)) * ff[-4]
u_new = uu[-4] + h * du_new
else:
u_new = uu[-4] + h * ((8.0/3.0) * ff[-1] - (4.0/3.0) * ff[-2] + (8.0/3.0) * ff[-3])
u_old = uu[-1]
eta_old = eta(u_old)
if projection:
gamma, u_new = conservative_projection_solve(eta, deta, u_old, eta_old, u_new, method, tol, maxiter)
elif relaxation:
gamma = conservative_relaxation_solve(eta, deta, u_old, eta_old, u_new, old_gamma[-1], method, tol, maxiter)
u_new = u_old + gamma * (u_new - u_old)
old_gamma[-4] = old_gamma[-3]
old_gamma[-3] = old_gamma[-2]
old_gamma[-2] = old_gamma[-1]
old_gamma[-1] = gamma
else:
gamma = 1.0
if return_gamma:
gammas.append(gamma)
uu.append(u_new)
if relaxation and adapt_dt:
t = tt[-1] + gamma * h
old_omega[-4] = old_omega[-3]
old_omega[-3] = old_omega[-2]
old_omega[-2] = old_omega[-1]
old_omega[-1] = gamma
if gamma < 1.0e-14:
raise Exception("gamma = %.2e is too small in step %d!" % (gamma, step))
else:
t = tt[-1] + h
tt.append(t)
ff[-4] = ff[-3]
ff[-3] = ff[-2]
ff[-2] = ff[-1]
ff[-1] = f(u_new)
if return_gamma:
return np.array(tt), uu, np.array(gammas)
else:
return np.array(tt), uu
# SSP LMMs with variable step size by Hadjimichael, Ketcheson, Lóczi, and Németh (2016)
def fixed_step_SSP32(uu, ff, h):
u_new = 0.25 * uu[-3] + 0.75 * (uu[-1] + 2 * h * ff[-1])
return u_new
def fixed_estimate_SSP32(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
eta_est = 0.25 * old_eta[-3] + 0.75 * (old_eta[-1] + 2 * h * old_deta_f[-1])
return eta_est
def adaptive_step_SSP32(uu, ff, h, old_omega):
Omega = old_omega[-1] + old_omega[-2]
Omega2 = Omega * Omega
u_new = (1.0/Omega2) * uu[-3] + (Omega2-1)/Omega2 * (uu[-1] + Omega/(Omega-1) * h * ff[-1])
return u_new
def adaptive_estimate_SSP32(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
Omega = old_omega[-1] + old_omega[-2]
Omega2 = Omega * Omega
eta_est = (1.0/Omega2) * old_eta[-3] + (Omega2-1)/Omega2 * (old_eta[-1] + Omega/(Omega-1) * h * old_deta_f[-1])
return eta_est
def adaptive_u_old_SSP32(old_omega):
Omega = old_omega[-1] + old_omega[-2]
Omega2 = Omega * Omega
return [1.0/Omega2, 0.0, (Omega2-1)/Omega2]
def conservative_SSP32(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_SSP32, adaptive_step_SSP32,
**kwargs)
def cons_or_diss_SSP32(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_SSP32, adaptive_step_SSP32,
fixed_estimate_SSP32, adaptive_estimate_SSP32,
**kwargs)
def fixed_step_SSP43(uu, ff, h):
u_new = (16./27.) * (uu[-1] + 3 * h * ff[-1]) + (11./27.) * (uu[-4] + (12./11.) * h * ff[-4])
return u_new
def fixed_estimate_SSP43(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
eta_est = (16./27.) * (old_eta[-1] + 3 * h * old_deta_f[-1]) + (11./27.) * (old_eta[-4] + (12./11.) * h * old_deta_f[-4])
return eta_est
def adaptive_step_SSP43(uu, ff, h, old_omega):
Omega = old_omega[-1] + old_omega[-2] + old_omega[-3]
Omega3 = Omega * Omega * Omega
u_new = (
(Omega+1) * (Omega+1) * (Omega-2) / Omega3
) * ( uu[-1] + Omega/(Omega-2) * h * ff[-1] ) + (
(3*Omega + 2) / Omega3
) * ( uu[-4] + Omega*(Omega+1)/(3*Omega+2) * h * ff[-4] )
return u_new
def adaptive_estimate_SSP43(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
Omega = old_omega[-1] + old_omega[-2] + old_omega[-3]
Omega3 = Omega * Omega * Omega
eta_est = (
(Omega+1) * (Omega+1) * (Omega-2) / Omega3
) * ( old_eta[-1] + Omega/(Omega-2) * h * old_deta_f[-1] ) + (
(3*Omega + 2) / Omega3
) * ( old_eta[-4] + Omega*(Omega+1)/(3*Omega+2) * h * old_deta_f[-4] )
return eta_est
def adaptive_u_old_SSP43(old_omega):
Omega = old_omega[-1] + old_omega[-2] + old_omega[-3]
Omega3 = Omega * Omega * Omega
return [(3*Omega + 2) / Omega3, 0.0, 0.0, (Omega+1) * (Omega+1) * (Omega-2) / Omega3]
def conservative_SSP43(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_SSP43, adaptive_step_SSP43,
**kwargs)
def cons_or_diss_SSP43(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_SSP43, adaptive_step_SSP43,
fixed_estimate_SSP43, adaptive_estimate_SSP43,
**kwargs)
# SSP LMMs with variable step size by Mohammadi, Arévalo, and Führer (2019), based on Arévalo & Söderlind (2017)
def fixed_step_SSP32AS(uu, ff, h):
u_new = (
0.75
) * uu[-1] + (
0.25
) * uu[-3] + h * (
(
1.5
) * ff[-1]
)
return u_new
def fixed_estimate_SSP32AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
0.9375
) * uu[-1] + (
0.0625
) * uu[-3] + h * (
(
0.625
) * ff[-1]
)
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def adaptive_step_SSP32AS(uu, ff, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
1 - 1/((om1 + om2)*(om1 + om2))
) * uu[-1] + (
1/((om1 + om2)*(om1 + om2))
) * uu[-3] + h * (
(
1 + 1/(om1 + om2)
) * ff[-1]
)
return u_new
def adaptive_estimate_SSP32AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = (
1 - 1/(4.*((om1 + om2)*(om1 + om2)))
) * uu[-1] + (
1/(4.*((om1 + om2)*(om1 + om2)))
) * uu[-3] + h * (
(
(2 + 1/(om1 + om2))/4.
) * ff[-1]
)
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def conservative_SSP32AS(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_SSP32AS, adaptive_step_SSP32AS,
**kwargs)
def cons_or_diss_SSP32AS(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_SSP32AS, adaptive_step_SSP32AS,
fixed_estimate_SSP32AS, adaptive_estimate_SSP32AS,
**kwargs)
def fixed_step_SSP43AS(uu, ff, h):
u_new = (
0.5925925925925926
) * uu[-1] + (
0.4074074074074074
) * uu[-4] + h * (
(
1.7777777777777777
) * ff[-1] + (
0.4444444444444444
) * ff[-4]
)
return u_new
def fixed_estimate_SSP43AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
0.9844148679880743
) * uu[-1] + (
0.015585132011925783
) * uu[-4] + h * (
(
0.2421455965461624
) * ff[-1] + (
0.01593466489480193
) * ff[-4]
)
u2int = (
0.7563258727526666
) * uu[-1] + (
0.2436741272473335
) * uu[-4] + h * (
(
1.2578544034538375
) * ff[-1] + (
0.26184311288297585
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_SSP43AS(uu, ff, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
((-2 + om1 + om2 + om3)*((1 + om1 + om2 + om3)*(1 + om1 + om2 + om3)))/((om1 + om2 + om3)*(om1 + om2 + om3)*(om1 + om2 + om3))
) * uu[-1] + (
(2 + 3*(om1 + om2 + om3))/((om1 + om2 + om3)*(om1 + om2 + om3)*(om1 + om2 + om3))
) * uu[-4] + h * (
(
((1 + om1 + om2 + om3)*(1 + om1 + om2 + om3))/((om1 + om2 + om3)*(om1 + om2 + om3))
) * ff[-1] + (
(1 + om1 + om2 + om3)/((om1 + om2 + om3)*(om1 + om2 + om3))
) * ff[-4]
)
return u_new
def adaptive_estimate_SSP43AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = (
1 + (-9 + 5*np.sqrt(3))/(18.*((om1 + om2 + om3)*(om1 + om2 + om3)*(om1 + om2 + om3))) + (-2 + np.sqrt(3))/(2.*((om1 + om2 + om3)*(om1 + om2 + om3)))
) * uu[-1] + (
(9 - 5*np.sqrt(3) + 18*(om1 + om2 + om3) - 9*np.sqrt(3)*(om1 + om2 + om3))/(18.*((om1 + om2 + om3)*(om1 + om2 + om3)*(om1 + om2 + om3)))
) * uu[-4] + h * (
(
(-6*(-3 + np.sqrt(3)) + (9 - 5*np.sqrt(3))/((om1 + om2 + om3)*(om1 + om2 + om3)) - (12*(-2 + np.sqrt(3)))/(om1 + om2 + om3))/36.
) * ff[-1] + (
(9 - 5*np.sqrt(3) - 6*(-2 + np.sqrt(3))*(om1 + om2 + om3))/(36.*((om1 + om2 + om3)*(om1 + om2 + om3)))
) * ff[-4]
)
u2int = (
-(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*(om1 + om2 + om3) - 18*((om1 + om2 + om3)*(om1 + om2 + om3)*(om1 + om2 + om3)))/(18.*((om1 + om2 + om3)*(om1 + om2 + om3)*(om1 + om2 + om3)))
) * uu[-1] + (
(9 + 5*np.sqrt(3) + 18*(om1 + om2 + om3) + 9*np.sqrt(3)*(om1 + om2 + om3))/(18.*((om1 + om2 + om3)*(om1 + om2 + om3)*(om1 + om2 + om3)))
) * uu[-4] + h * (
(
(6*(3 + np.sqrt(3)) + (9 + 5*np.sqrt(3))/((om1 + om2 + om3)*(om1 + om2 + om3)) + (12*(2 + np.sqrt(3)))/(om1 + om2 + om3))/36.
) * ff[-1] + (
(9 + 5*np.sqrt(3) + 6*(2 + np.sqrt(3))*(om1 + om2 + om3))/(36.*((om1 + om2 + om3)*(om1 + om2 + om3)))
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_SSP43AS(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_SSP43AS, adaptive_step_SSP43AS,
**kwargs)
def cons_or_diss_SSP43AS(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_SSP43AS, adaptive_step_SSP43AS,
fixed_estimate_SSP43AS, adaptive_estimate_SSP43AS,
**kwargs)
# extrapolated BDF (eBDF) methods with variable step size
def fixed_step_eBDF2(uu, ff, h):
u_new = (
1.3333333333333333
) * uu[-1] + (
-0.3333333333333333
) * uu[-2] + h * (
(
1.3333333333333333
) * ff[-1] + (
-0.6666666666666666
) * ff[-2]
)
return u_new
def adaptive_step_eBDF2(uu, ff, h, old_omega):
om1 = old_omega[-1]
u_new = (
((1 + om1)*(1 + om1))/(om1*(2 + om1))
) * uu[-1] + (
-(1/(2*om1 + om1*om1))
) * uu[-2] + h * (
(
((1 + om1)*(1 + om1))/(om1*(2 + om1))
) * ff[-1] + (
-((1 + om1)/(2*om1 + om1*om1))
) * ff[-2]
)
return u_new
def conservative_eBDF2(f, t_final, t0, u0, t1, u1,
idx_u_old=-1,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
return conservative_LMM(f, t_final, [t0, t1], [u0, u1],
fixed_step_eBDF2, adaptive_step_eBDF2,
idx_u_old,
eta, deta,
return_gamma,
projection, relaxation,
adapt_dt, adapt_coefficients,
method, tol, maxiter, maxsteps)
def fixed_step_eBDF3(uu, ff, h):
u_new = (
1.6363636363636365
) * uu[-1] + (
-0.8181818181818182
) * uu[-2] + (
0.18181818181818182
) * uu[-3] + h * (
(
1.6363636363636365
) * ff[-1] + (
-1.6363636363636365
) * ff[-2] + (
0.5454545454545454
) * ff[-3]
)
return u_new
def adaptive_step_eBDF3(uu, ff, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
((1 + om1)*(1 + om1)*((1 + om1 + om2)*(1 + om1 + om2)))/(om1*(om1 + om2)*(3 + 2*om2 + om1*(4 + om1 + om2)))
) * uu[-1] + (
-(((1 + om1 + om2)*(1 + om1 + om2))/(om1*om2*(3 + 2*om2 + om1*(4 + om1 + om2))))
) * uu[-2] + (
((1 + om1)*(1 + om1))/(om2*(om1 + om2)*(3 + 2*om2 + om1*(4 + om1 + om2)))
) * uu[-3] + h * (
(
((1 + om1)*(1 + om1)*((1 + om1 + om2)*(1 + om1 + om2)))/(om1*(om1 + om2)*(3 + 2*om2 + om1*(4 + om1 + om2)))
) * ff[-1] + (
-(((1 + om1)*((1 + om1 + om2)*(1 + om1 + om2)))/(om1*om2*(3 + 2*om2 + om1*(4 + om1 + om2))))
) * ff[-2] + (
((1 + om1)*(1 + om1)*(1 + om1 + om2))/(om2*(om1 + om2)*(3 + 2*om2 + om1*(4 + om1 + om2)))
) * ff[-3]
)
return u_new
def conservative_eBDF3(f, t_final, t0, u0, t1, u1, t2, u2,
idx_u_old=-1,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
return conservative_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_eBDF3, adaptive_step_eBDF3,
idx_u_old,
eta, deta,
return_gamma,
projection, relaxation,
adapt_dt, adapt_coefficients,
method, tol, maxiter, maxsteps)
def fixed_step_eBDF4(uu, ff, h):
u_new = (
1.92
) * uu[-1] + (
-1.44
) * uu[-2] + (
0.64
) * uu[-3] + (
-0.12
) * uu[-4] + h * (
(
1.92
) * ff[-1] + (
-2.88
) * ff[-2] + (
1.92
) * ff[-3] + (
-0.48
) * ff[-4]
)
return u_new
def adaptive_step_eBDF4(uu, ff, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
((1 + om1)*(1 + om1)*((1 + om1 + om2)*(1 + om1 + om2))*((1 + om1 + om2 + om3)*(1 + om1 + om2 + om3)))/(om1*(om1 + om2)*(om1 + om2 + om3)*(4 + om1*om1*om1 + 3*om3 + 2*om2*(3 + om2 + om3) + om1*om1*(6 + 2*om2 + om3) + om1*(9 + 4*om3 + om2*(8 + om2 + om3))))
) * uu[-1] + (
-(((1 + om1 + om2)*(1 + om1 + om2)*((1 + om1 + om2 + om3)*(1 + om1 + om2 + om3)))/(om1*om2*(om2 + om3)*(4 + om1*om1*om1 + 3*om3 + 2*om2*(3 + om2 + om3) + om1*om1*(6 + 2*om2 + om3) + om1*(9 + 4*om3 + om2*(8 + om2 + om3)))))
) * uu[-2] + (
((1 + om1)*(1 + om1)*((1 + om1 + om2 + om3)*(1 + om1 + om2 + om3)))/(om2*(om1 + om2)*om3*(4 + om1*om1*om1 + 3*om3 + 2*om2*(3 + om2 + om3) + om1*om1*(6 + 2*om2 + om3) + om1*(9 + 4*om3 + om2*(8 + om2 + om3))))
) * uu[-3] + (
-(((1 + om1)*(1 + om1)*((1 + om1 + om2)*(1 + om1 + om2)))/(om3*(om2 + om3)*(om1 + om2 + om3)*(4 + om1*om1*om1 + 3*om3 + 2*om2*(3 + om2 + om3) + om1*om1*(6 + 2*om2 + om3) + om1*(9 + 4*om3 + om2*(8 + om2 + om3)))))
) * uu[-4] + h * (
(
((1 + om1)*(1 + om1)*((1 + om1 + om2)*(1 + om1 + om2))*((1 + om1 + om2 + om3)*(1 + om1 + om2 + om3)))/(om1*(om1 + om2)*(om1 + om2 + om3)*(4 + om1*om1*om1 + 3*om3 + 2*om2*(3 + om2 + om3) + om1*om1*(6 + 2*om2 + om3) + om1*(9 + 4*om3 + om2*(8 + om2 + om3))))
) * ff[-1] + (
-(((1 + om1)*((1 + om1 + om2)*(1 + om1 + om2))*((1 + om1 + om2 + om3)*(1 + om1 + om2 + om3)))/(om1*om2*(om2 + om3)*(4 + om1*om1*om1 + 3*om3 + 2*om2*(3 + om2 + om3) + om1*om1*(6 + 2*om2 + om3) + om1*(9 + 4*om3 + om2*(8 + om2 + om3)))))
) * ff[-2] + (
((1 + om1)*(1 + om1)*(1 + om1 + om2)*((1 + om1 + om2 + om3)*(1 + om1 + om2 + om3)))/(om2*(om1 + om2)*om3*(4 + om1*om1*om1 + 3*om3 + 2*om2*(3 + om2 + om3) + om1*om1*(6 + 2*om2 + om3) + om1*(9 + 4*om3 + om2*(8 + om2 + om3))))
) * ff[-3] + (
-(((1 + om1)*(1 + om1)*((1 + om1 + om2)*(1 + om1 + om2))*(1 + om1 + om2 + om3))/(om3*(om2 + om3)*(om1 + om2 + om3)*(4 + om1*om1*om1 + 3*om3 + 2*om2*(3 + om2 + om3) + om1*om1*(6 + 2*om2 + om3) + om1*(9 + 4*om3 + om2*(8 + om2 + om3)))))
) * ff[-4]
)
return u_new
def conservative_eBDF4(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_eBDF4, adaptive_step_eBDF4,
**kwargs)
# extrapolated BDF (eBDF) methods with variable step size version of Arévalo & Söderlind (2017)
def fixed_step_eBDF2AS(uu, ff, h):
u_new = (
1.3333333333333333
) * uu[-1] + (
-0.3333333333333333
) * uu[-2] + h * (
(
1.3333333333333333
) * ff[-1] + (
-0.6666666666666666
) * ff[-2]
)
return u_new
def fixed_estimate_eBDF2AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
1.0833333333333333
) * uu[-1] + (
-0.08333333333333333
) * uu[-2] + h * (
(
0.5833333333333334
) * ff[-1] + (
-0.16666666666666666
) * ff[-2]
)
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def adaptive_step_eBDF2AS(uu, ff, h, old_omega):
om1 = old_omega[-1]
u_new = (
1 + 1/(3.*(om1*om1))
) * uu[-1] + (
-1/(3.*(om1*om1))
) * uu[-2] + h * (
(
1 + 1/(3.*om1)
) * ff[-1] + (
-2/(3.*om1)
) * ff[-2]
)
return u_new
def adaptive_estimate_eBDF2AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om1 = old_omega[-1]
u1int = (
1 + 1/(12.*(om1*om1))
) * uu[-1] + (
-1/(12.*(om1*om1))
) * uu[-2] + h * (
(
(6 + 1/om1)/12.
) * ff[-1] + (
-1/(6.*om1)
) * ff[-2]
)
eta_est = old_eta[-1] + h * ( 1 * np.dot(deta(u1int), f(u1int)) )
return eta_est
def conservative_eBDF2AS(f, t_final, t0, u0, t1, u1,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1], [u0, u1],
fixed_step_eBDF2AS, adaptive_step_eBDF2AS,
**kwargs)
def cons_or_diss_eBDF2AS(f, t_final, t0, u0, t1, u1,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1], [u0, u1],
fixed_step_eBDF2AS, adaptive_step_eBDF2AS,
fixed_estimate_eBDF2AS, adaptive_estimate_eBDF2AS,
**kwargs)
def fixed_step_eBDF3AS(uu, ff, h):
u_new = (
1.6363636363636365
) * uu[-1] + (
-0.8181818181818182
) * uu[-2] + (
0.18181818181818182
) * uu[-3] + h * (
(
1.6363636363636365
) * ff[-1] + (
-1.6363636363636365
) * ff[-2] + (
0.5454545454545454
) * ff[-3]
)
return u_new
def fixed_estimate_eBDF3AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
1.0244164888675966
) * uu[-1] + (
-0.030134742440450477
) * uu[-2] + (
0.005718253572853871
) * uu[-3] + h * (
(
0.23574135427278373
) * ff[-1] + (
-0.060269484880900955
) * ff[-2] + (
0.01715476071856161
) * ff[-3]
)
u2int = (
1.3808865414354339
) * uu[-1] + (
-0.4850167727110647
) * uu[-2] + (
0.104130231275631
) * uu[-3] + h * (
(
1.1695616760302463
) * ff[-1] + (
-0.9700335454221294
) * ff[-2] + (
0.31239069382689294
) * ff[-3]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_eBDF3AS(uu, ff, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
(2*(-2 + om1)*(om1*om1)*((1 + om1)*(1 + om1)) - 2*(-2 + om1)*om1*((1 + om1)*(1 + om1))*om2 + 5*(1 + 3*om1 + 4*(om1*om1*om1))*(om2*om2) + 8*(1 + 3*(om1*om1))*(om2*om2*om2))/(2.*(om1*om1)*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * uu[-1] + (
(om1 + om1*om1 - 7*om1*om2 - om2*(5 + 8*om2))/(2.*(om1*om1)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * uu[-2] + (
(3 + 5*om1)/(2.*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * uu[-3] + h * (
(
(2*((om1 + om1*om1)*(om1 + om1*om1)) - 2*om1*((1 + om1)*(1 + om1))*om2 + 5*(1 + om1*(3 + 4*om1))*(om2*om2) + 8*(1 + 3*om1)*(om2*om2*om2))/(2.*om1*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * ff[-1] + (
(om1 + om1*om1 - 5*om2 - 7*om1*om2 - 8*(om2*om2))/(om1*om1*om1 - 2*(om1*om1)*om2 + 12*om1*(om2*om2))
) * ff[-2] + (
(3*(3 + 5*om1)*om2)/(2.*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * ff[-3]
)
return u_new
def adaptive_estimate_eBDF3AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = (
(4*(om1*om1)*(-9 + 5*np.sqrt(3) + 9*om1*(-2 + np.sqrt(3) + 2*(om1*om1))) - 4*om1*(-9 + 5*np.sqrt(3) + 9*om1*(-2 + np.sqrt(3) + 2*(om1*om1)))*om2 + 5*(9 - 5*np.sqrt(3) + 18*om1*(2 - np.sqrt(3) + 8*(om1*om1)))*(om2*om2) - 48*(-2 + np.sqrt(3) - 18*(om1*om1))*(om2*om2*om2))/(72.*(om1*om1)*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * uu[-1] + (
(-6*(-2 + np.sqrt(3))*(om1*om1) + om1*(9 - 5*np.sqrt(3) + 42*(-2 + np.sqrt(3))*om2) + om2*(5*(-9 + 5*np.sqrt(3)) + 48*(-2 + np.sqrt(3))*om2))/(72.*(om1*om1)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * uu[-2] + (
(9 - 5*np.sqrt(3) - 10*(-2 + np.sqrt(3))*om1)/(24.*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * uu[-3] + h * (
(
-(20*(-2 + np.sqrt(3) + 3*(-3 + np.sqrt(3))*om1) + (3*(-9 + 5*np.sqrt(3) + 10*(-2 + np.sqrt(3))*om1))/(om1 + om2) + ((-9 + 5*np.sqrt(3) + 10*(-2 + np.sqrt(3))*om1)*(7*om1 - 11*om2))/(om1*om1 - 2*om1*om2 + 12*(om2*om2)))/(360.*om1)
) * ff[-1] + (
(-6*(-2 + np.sqrt(3))*(om1*om1) + om1*(9 - 5*np.sqrt(3) + 42*(-2 + np.sqrt(3))*om2) + om2*(5*(-9 + 5*np.sqrt(3)) + 48*(-2 + np.sqrt(3))*om2))/(36.*om1*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * ff[-2] + (
((9 - 5*np.sqrt(3) - 10*(-2 + np.sqrt(3))*om1)*om2)/(8.*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * ff[-3]
)
u2int = (
(4*(om1*om1)*(-9 - 5*np.sqrt(3) - 9*(2 + np.sqrt(3))*om1 + 18*(om1*om1*om1)) + 4*om1*(9 + 5*np.sqrt(3) + 9*om1*(2 + np.sqrt(3) - 2*(om1*om1)))*om2 + 5*(9 + 5*np.sqrt(3) + 18*om1*(2 + np.sqrt(3) + 8*(om1*om1)))*(om2*om2) + 48*(2 + np.sqrt(3) + 18*(om1*om1))*(om2*om2*om2))/(72.*(om1*om1)*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * uu[-1] + (
(6*(2 + np.sqrt(3))*(om1*om1) + om1*(9 + 5*np.sqrt(3) - 42*(2 + np.sqrt(3))*om2) - om2*(5*(9 + 5*np.sqrt(3)) + 48*(2 + np.sqrt(3))*om2))/(72.*(om1*om1)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * uu[-2] + (
(9 + 5*np.sqrt(3) + 10*(2 + np.sqrt(3))*om1)/(24.*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * uu[-3] + h * (
(
(20*(2 + np.sqrt(3) + 3*(3 + np.sqrt(3))*om1) + (3*(9 + 5*np.sqrt(3) + 10*(2 + np.sqrt(3))*om1))/(om1 + om2) + ((9 + 5*np.sqrt(3) + 10*(2 + np.sqrt(3))*om1)*(7*om1 - 11*om2))/(om1*om1 - 2*om1*om2 + 12*(om2*om2)))/(360.*om1)
) * ff[-1] + (
(6*(2 + np.sqrt(3))*(om1*om1) + om1*(9 + 5*np.sqrt(3) - 42*(2 + np.sqrt(3))*om2) - om2*(5*(9 + 5*np.sqrt(3)) + 48*(2 + np.sqrt(3))*om2))/(36.*om1*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * ff[-2] + (
((9 + 5*np.sqrt(3) + 10*(2 + np.sqrt(3))*om1)*om2)/(8.*(om1 + om2)*(om1*om1 - 2*om1*om2 + 12*(om2*om2)))
) * ff[-3]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_eBDF3AS(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_eBDF3AS, adaptive_step_eBDF3AS,
**kwargs)
def cons_or_diss_eBDF3AS(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_eBDF3AS, adaptive_step_eBDF3AS,
fixed_estimate_eBDF3AS, adaptive_estimate_eBDF3AS,
**kwargs)
def fixed_step_eBDF4AS(uu, ff, h):
u_new = (
1.92
) * uu[-1] + (
-1.44
) * uu[-2] + (
0.64
) * uu[-3] + (
-0.12
) * uu[-4] + h * (
(
1.92
) * ff[-1] + (
-2.88
) * ff[-2] + (
1.92
) * ff[-3] + (
-0.48
) * ff[-4]
)
return u_new
def fixed_estimate_eBDF4AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
1.031595708345322
) * uu[-1] + (
-0.045873800526233105
) * uu[-2] + (
0.01731545426764106
) * uu[-3] + (
-0.003037362086729975
) * uu[-4] + h * (
(
0.2429205737505091
) * ff[-1] + (
-0.09174760105246621
) * ff[-2] + (
0.05194636280292318
) * ff[-3] + (
-0.0121494483469199
) * ff[-4]
)
u2int = (
1.5346388595559126
) * uu[-1] + (
-0.8220891624367299
) * uu[-2] + (
0.35249936054717373
) * uu[-3] + (
-0.06504905766635645
) * uu[-4] + h * (
(
1.3233139941507255
) * ff[-1] + (
-1.6441783248734598
) * ff[-2] + (
1.0574980816415214
) * ff[-3] + (
-0.2601962306654258
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_eBDF4AS(uu, ff, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
-((3*(om2*om2)*(om1 + om2)*((1 + om1 + om2)*(1 + om1 + om2))*(-4*(om1*om1*om1*om1) - om1*om1*om1*(-8 + om2) + 3*om1*om2 + om2*om2 + 3*(om1*om1)*(4 + (-2 + om2)*om2)) - 3*om2*(om1 + om2)*((1 + om1 + om2)*(1 + om1 + om2))*(-4*(om1*om1*om1*om1) - om1*om1*om1*(-8 + om2) + 3*om1*om2 + om2*om2 + 3*(om1*om1)*(4 + (-2 + om2)*om2))*om3 + 7*(2*(-3 + om1)*(om1*om1*om1)*((1 + om1)*(1 + om1)*(1 + om1)) + 2*(om1*om1)*((1 + om1)*(1 + om1))*(-3 + 5*(-2 + om1)*om1)*om2 + 12*om1*((1 + om1)*(1 + om1))*(om2*om2) + (7 + om1*(52 + 78*om1 + 23*(om1*om1*om1)))*(om2*om2*om2) + 19*(1 + 3*om1 + 4*(om1*om1*om1))*(om2*om2*om2*om2) + 15*(1 + 3*(om1*om1))*(om2*om2*om2*om2*om2))*(om3*om3) + 11*(4*(om1*om1)*((1 + om1)*(1 + om1))*(-1 + (-2 + om1)*om1) + 4*om1*((-1 + om1*om1)*(-1 + om1*om1))*om2 + (5 + om1*(36 + 54*om1 + 17*(om1*om1*om1)))*(om2*om2) + 20*(1 + 3*om1 + 4*(om1*om1*om1))*(om2*om2*om2) + 21*(1 + 3*(om1*om1))*(om2*om2*om2*om2))*(om3*om3*om3) + 15*(2*(-2 + om1)*(om1*om1)*((1 + om1)*(1 + om1)) - 2*(-2 + om1)*om1*((1 + om1)*(1 + om1))*om2 + 5*(1 + 3*om1 + 4*(om1*om1*om1))*(om2*om2) + 8*(1 + 3*(om1*om1))*(om2*om2*om2))*(om3*om3*om3*om3))/(om1*om1*(om1 + om2)*(om1 + om2 + om3)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3))))
) * uu[-1] + (
(3*om2*((om1 + om2)*(om1 + om2))*((1 + om1 + om2)*(1 + om1 + om2)) - 3*(om1 + om2)*((1 + om1 + om2)*(1 + om1 + om2))*(om1 + 2*om2)*om3 + (-(om1*(1 + om1)*(11 + 18*om1)) + (55 + om1*(116 + 75*om1))*om2 + (145 + 204*om1)*(om2*om2) + 111*(om2*om2*om2))*(om3*om3) - 15*(om1 + om1*om1 - 7*om1*om2 - om2*(5 + 8*om2))*(om3*om3*om3))/(om1*om1*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * uu[-2] + (
(-2*(om1*om1*om1*om1) + om1*om1*om1*(-4 + om2 + 5*om3) + 2*(om1*om1)*(-1 + om2 + 4*(om2*om2) - 30*om2*om3 + (5 - 34*om3)*om3) + om1*(om2 + om2*om2*(9 + 5*om2) + 5*om3 - 13*om2*(6 + 5*om2)*om3 - 29*(3 + 5*om2)*(om3*om3) - 75*(om3*om3*om3)) + 3*(om2 + om3)*(om2 + om2*om2 - 14*om2*om3 - om3*(11 + 15*om3)))/((om1 + om2)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * uu[-3] + (
(2*(om1*om1)*((1 + om1)*(1 + om1)) - 4*om1*((1 + om1)*(1 + om1))*om2 + (24 + 7*om1*(9 + 7*om1))*(om2*om2) + 11*(3 + 5*om1)*(om2*om2*om2))/(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*((om1 + om2)*(om1 + om2)*(om1 + om2))*om3 - 7*(om1 + om2)*(2*(om1*om1*om1) + 6*(om1*om1)*om2 - 14*om1*(om2*om2) + 45*(om2*om2*om2))*(om3*om3) - 11*(4*(om1*om1*om1) + 17*om1*(om2*om2) + 63*(om2*om2*om2))*(om3*om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3*om3))
) * uu[-4] + h * (
(
-((3*(om2*om2)*((om1 + om2)*(om1 + om2))*((1 + om1 + om2)*(1 + om1 + om2))*(-4*om1*(1 + om1) + om2 + 3*om1*om2) - 3*om2*((om1 + om2)*(om1 + om2))*((1 + om1 + om2)*(1 + om1 + om2))*(-4*om1*(1 + om1) + om2 + 3*om1*om2)*om3 + 7*(2*(om1*om1*om1)*((1 + om1)*(1 + om1)*(1 + om1)) + 2*(om1*om1)*((1 + om1)*(1 + om1))*(2 + 5*om1)*om2 - 6*om1*((1 + om1)*(1 + om1))*(om2*om2) + (7 + om1*(16 + om1*(18 + 23*om1)))*(om2*om2*om2) + 19*(1 + om1*(3 + 4*om1))*(om2*om2*om2*om2) + 15*(1 + 3*om1)*(om2*om2*om2*om2*om2))*(om3*om3) + 11*(2*(om1*om1)*((1 + om1)*(1 + om1))*(1 + 2*om1) + 2*om1*((1 + om1)*(1 + om1))*(-1 + 2*om1)*om2 + (5 + om1*(12 + om1*(14 + 17*om1)))*(om2*om2) + 20*(1 + om1*(3 + 4*om1))*(om2*om2*om2) + 21*(1 + 3*om1)*(om2*om2*om2*om2))*(om3*om3*om3) + 15*(2*(om1*om1)*((1 + om1)*(1 + om1)) - 2*om1*((1 + om1)*(1 + om1))*om2 + 5*(1 + om1*(3 + 4*om1))*(om2*om2) + 8*(1 + 3*om1)*(om2*om2*om2))*(om3*om3*om3*om3))/(om1*(om1 + om2)*(om1 + om2 + om3)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3))))
) * ff[-1] + (
(2*(3*om2*((om1 + om2)*(om1 + om2))*((1 + om1 + om2)*(1 + om1 + om2)) - 3*(om1 + om2)*((1 + om1 + om2)*(1 + om1 + om2))*(om1 + 2*om2)*om3 + (-(om1*(1 + om1)*(11 + 18*om1)) + (55 + om1*(116 + 75*om1))*om2 + (145 + 204*om1)*(om2*om2) + 111*(om2*om2*om2))*(om3*om3) - 15*(om1 + om1*om1 - 7*om1*om2 - om2*(5 + 8*om2))*(om3*om3*om3)))/(om1*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * ff[-2] + (
(3*om2*(-2*(om1*om1*om1*om1) + om1*om1*om1*(-4 + om2 + 5*om3) + 2*(om1*om1)*(-1 + om2 + 4*(om2*om2) - 30*om2*om3 + (5 - 34*om3)*om3) + om1*(om2 + om2*om2*(9 + 5*om2) + 5*om3 - 13*om2*(6 + 5*om2)*om3 - 29*(3 + 5*om2)*(om3*om3) - 75*(om3*om3*om3)) + 3*(om2 + om3)*(om2 + om2*om2 - 14*om2*om3 - om3*(11 + 15*om3))))/((om1 + om2)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * ff[-3] + (
(4*(2*(om1*om1)*((1 + om1)*(1 + om1)) - 4*om1*((1 + om1)*(1 + om1))*om2 + (24 + 7*om1*(9 + 7*om1))*(om2*om2) + 11*(3 + 5*om1)*(om2*om2*om2))*om3)/(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*((om1 + om2)*(om1 + om2)*(om1 + om2))*om3 - 7*(om1 + om2)*(2*(om1*om1*om1) + 6*(om1*om1)*om2 - 14*om1*(om2*om2) + 45*(om2*om2*om2))*(om3*om3) - 11*(4*(om1*om1*om1) + 17*om1*(om2*om2) + 63*(om2*om2*om2))*(om3*om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3*om3))
) * ff[-4]
)
return u_new
def adaptive_estimate_eBDF4AS(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = (
(18*(-2 + np.sqrt(3) - 18*(om1*om1))*(om2*om2*om2*om2*om2*om2*om2) + 2*(om1*om1)*(om3*om3)*(-7*om1*(3*(-7 + 4*np.sqrt(3)) + 4*om1*(2*(-9 + 5*np.sqrt(3)) + 9*om1*(-2 + np.sqrt(3) + om1*om1))) - 22*(-7 + 4*np.sqrt(3) + 4*om1*(-9 + 5*np.sqrt(3) + 6*(-2 + np.sqrt(3))*om1 + 9*(om1*om1*om1)))*om3 - 30*(-9 + 5*np.sqrt(3) + 9*om1*(-2 + np.sqrt(3) + 2*(om1*om1)))*(om3*om3)) + om2*om2*om2*om2*(12*om1*(-7 + 4*np.sqrt(3) + 8*(-9 + 5*np.sqrt(3))*om1 + 60*(-2 + np.sqrt(3))*(om1*om1) + 108*(om1*om1*om1*om1)) + 3*(7 - 4*np.sqrt(3))*om3 + 6*om1*(45 - 25*np.sqrt(3) - 45*(-2 + np.sqrt(3))*om1 + 36*(om1*om1*om1))*om3 + 133*(-9 + 5*np.sqrt(3) + 18*om1*(-2 + np.sqrt(3) - 8*(om1*om1)))*(om3*om3) + 1386*(-2 + np.sqrt(3) - 18*(om1*om1))*(om3*om3*om3)) + 2*om1*om2*om3*(-6*(om1*om1)*(3*(-7 + 4*np.sqrt(3)) + 4*om1*(2*(-9 + 5*np.sqrt(3)) + 9*om1*(-2 + np.sqrt(3) + om1*om1))) - 7*om1*(3*(-7 + 4*np.sqrt(3)) + 4*om1*(4*(-9 + 5*np.sqrt(3)) + 27*(-2 + np.sqrt(3))*om1 + 45*(om1*om1*om1)))*om3 - 22*(7 - 4*np.sqrt(3) + 12*(om1*om1)*(-2 + np.sqrt(3) + 3*(om1*om1)))*(om3*om3) + 30*(-9 + 5*np.sqrt(3) + 9*om1*(-2 + np.sqrt(3) + 2*(om1*om1)))*(om3*om3*om3)) + om2*om2*om2*(3*(om1*om1)*(15*(-7 + 4*np.sqrt(3)) + 56*(-9 + 5*np.sqrt(3))*om1 + 324*(-2 + np.sqrt(3))*(om1*om1) + 468*(om1*om1*om1*om1)) - 12*om1*(-7 + 4*np.sqrt(3) + 8*(-9 + 5*np.sqrt(3))*om1 + 60*(-2 + np.sqrt(3))*(om1*om1) + 108*(om1*om1*om1*om1))*om3 + 7*(7*(-7 + 4*np.sqrt(3)) + 52*(-9 + 5*np.sqrt(3))*om1 + 468*(-2 + np.sqrt(3))*(om1*om1) - 828*(om1*om1*om1*om1))*(om3*om3) + 220*(-9 + 5*np.sqrt(3) + 18*(-2 + np.sqrt(3))*om1 - 144*(om1*om1*om1))*(om3*om3*om3) + 720*(-2 + np.sqrt(3) - 18*(om1*om1))*(om3*om3*om3*om3)) + 6*(om2*om2*om2*om2*om2*om2)*(-9 + 5*np.sqrt(3) - 3*(-2 + np.sqrt(3))*om3 + 18*om1*(-2 + np.sqrt(3) - 8*(om1*om1) + 3*om1*om3)) + 3*(om2*om2*om2*om2*om2)*(-7 + 4*np.sqrt(3) - 72*(om1*om1*om1*om1) + 288*(om1*om1*om1)*om3 + om1*(-90 + 50*np.sqrt(3) - 36*(-2 + np.sqrt(3))*om3) + 2*om3*(9 - 5*np.sqrt(3) + 105*(-2 + np.sqrt(3))*om3) + 90*(om1*om1)*(-2 + np.sqrt(3) - 42*(om3*om3))) + om2*om2*(432*(-2 + np.sqrt(3))*(om1*om1*om1*om1*om1) + 432*(om1*om1*om1*om1*om1*om1*om1) - 1404*(om1*om1*om1*om1*om1*om1)*om3 + 5*(om3*om3*om3)*(-77 + 44*np.sqrt(3) + 15*(-9 + 5*np.sqrt(3))*om3) + 6*om1*(om3*om3)*(-98 + 56*np.sqrt(3) + 66*(-9 + 5*np.sqrt(3))*om3 + 225*(-2 + np.sqrt(3))*(om3*om3)) + 3*(om1*om1)*om3*(105 - 60*np.sqrt(3) + 56*(-9 + 5*np.sqrt(3))*om3 + 1188*(-2 + np.sqrt(3))*(om3*om3)) - 12*(om1*om1*om1*om1)*(72 - 40*np.sqrt(3) + 81*(-2 + np.sqrt(3))*om3 + 561*(om3*om3*om3)) + 12*(om1*om1*om1)*(3*(-7 + 4*np.sqrt(3)) + 14*(9 - 5*np.sqrt(3))*om3 + 42*(-2 + np.sqrt(3))*(om3*om3) - 900*(om3*om3*om3*om3))))/(36.*(om1*om1)*(om1 + om2)*(om1 + om2 + om3)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * uu[-1] + (
(-18*(-2 + np.sqrt(3))*(om1*om1*om1*om1)*(om2 - om3) + 6*(om1*om1*om1)*(-12*(-2 + np.sqrt(3))*(om2*om2) + om2*(9 - 5*np.sqrt(3) + 15*(-2 + np.sqrt(3))*om3) + om3*(-9 + 5*np.sqrt(3) + 18*(-2 + np.sqrt(3))*om3)) + om2*(-18*(-2 + np.sqrt(3))*(om2*om2*om2*om2) + 5*(om3*om3)*(77 - 44*np.sqrt(3) + 15*(9 - 5*np.sqrt(3))*om3) + 6*(om2*om2*om2)*(9 - 5*np.sqrt(3) + 6*(-2 + np.sqrt(3))*om3) + om2*om3*(6*(-7 + 4*np.sqrt(3)) + 145*(9 - 5*np.sqrt(3))*om3 - 720*(-2 + np.sqrt(3))*(om3*om3)) - 3*(om2*om2)*(-7 + 4*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om3 + 222*(-2 + np.sqrt(3))*(om3*om3))) + om1*om1*(-108*(-2 + np.sqrt(3))*(om2*om2*om2) + 18*(om2*om2)*(9 - 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om3) + om3*(3*(-7 + 4*np.sqrt(3)) + 29*(-9 + 5*np.sqrt(3))*om3 + 90*(-2 + np.sqrt(3))*(om3*om3)) - 3*om2*(-7 + 4*np.sqrt(3) + 2*om3*(36 - 20*np.sqrt(3) + 75*(-2 + np.sqrt(3))*om3))) + om1*(-72*(-2 + np.sqrt(3))*(om2*om2*om2*om2) + 18*(om2*om2*om2)*(9 - 5*np.sqrt(3) + 7*(-2 + np.sqrt(3))*om3) + om3*om3*(-77 + 44*np.sqrt(3) + 15*(-9 + 5*np.sqrt(3))*om3) + om2*om3*(9*(-7 + 4*np.sqrt(3)) + 116*(9 - 5*np.sqrt(3))*om3 - 630*(-2 + np.sqrt(3))*(om3*om3)) - 6*(om2*om2)*(-7 + 4*np.sqrt(3) + om3*(45 - 25*np.sqrt(3) + 204*(-2 + np.sqrt(3))*om3))))/(36.*(om1*om1)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * uu[-2] + (
(12*(-2 + np.sqrt(3))*(om1*om1*om1*om1) + om1*om1*om1*(-36 + 20*np.sqrt(3) - 6*(-2 + np.sqrt(3))*om2 + 60*om3 - 30*np.sqrt(3)*om3) - 3*(om2 + om3)*((-9 + 5*np.sqrt(3))*(om2*om2) + om2*(-7 + 4*np.sqrt(3) + 14*(9 - 5*np.sqrt(3))*om3) + om3*(77 - 44*np.sqrt(3) + 15*(9 - 5*np.sqrt(3))*om3)) + 2*(om1*om1)*(-7 + 4*np.sqrt(3) - 24*(-2 + np.sqrt(3))*(om2*om2) + om2*(9 - 5*np.sqrt(3) + 180*(-2 + np.sqrt(3))*om3) + om3*(45 - 25*np.sqrt(3) + 204*(-2 + np.sqrt(3))*om3)) + om1*(-30*(-2 + np.sqrt(3))*(om2*om2*om2) + om2*om2*(81 - 45*np.sqrt(3) + 390*(-2 + np.sqrt(3))*om3) + om3*(35 - 20*np.sqrt(3) + 87*(-9 + 5*np.sqrt(3))*om3 + 450*(-2 + np.sqrt(3))*(om3*om3)) + om2*(7 - 4*np.sqrt(3) + 78*(-9 + 5*np.sqrt(3))*om3 + 870*(-2 + np.sqrt(3))*(om3*om3))))/(36.*(om1 + om2)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * uu[-3] + (
(-12*(-2 + np.sqrt(3))*(om1*om1*om1*om1) + 3*(om2*om2)*(56 - 32*np.sqrt(3) + (99 - 55*np.sqrt(3))*om2) + 4*(om1*om1*om1)*(9 - 5*np.sqrt(3) + 6*(-2 + np.sqrt(3))*om2) + om1*om2*(4*(-7 + 4*np.sqrt(3)) + 63*(9 - 5*np.sqrt(3))*om2 - 330*(-2 + np.sqrt(3))*(om2*om2)) + om1*om1*(14 - 8*np.sqrt(3) + 8*(-9 + 5*np.sqrt(3))*om2 - 294*(-2 + np.sqrt(3))*(om2*om2)))/(36.*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*((om1 + om2)*(om1 + om2)*(om1 + om2))*om3 - 7*(om1 + om2)*(2*(om1*om1*om1) + 6*(om1*om1)*om2 - 14*om1*(om2*om2) + 45*(om2*om2*om2))*(om3*om3) - 11*(4*(om1*om1*om1) + 17*om1*(om2*om2) + 63*(om2*om2*om2))*(om3*om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3*om3)))
) * uu[-4] + h * (
(
(-12*(-3 + np.sqrt(3))*(om1*om1*om1*om1*om1*om1)*(6*(om2*om2) - 6*om2*om3 - 7*(om3*om3)) + 6*(om1*om1*om1*om1*om1)*(-39*(-3 + np.sqrt(3))*(om2*om2*om2) + 42*(-2 + np.sqrt(3))*(om3*om3) + 44*(-3 + np.sqrt(3))*(om3*om3*om3) + 3*(om2*om2)*(-12*(-2 + np.sqrt(3)) + 13*(-3 + np.sqrt(3))*om3) + 2*om2*om3*(18*(-2 + np.sqrt(3)) + 35*(-3 + np.sqrt(3))*om3)) + 6*(om1*om1*om1*om1)*(-36*(-3 + np.sqrt(3))*(om2*om2*om2*om2) + 9*(om2*om2*om2)*(-11*(-2 + np.sqrt(3)) + 4*(-3 + np.sqrt(3))*om3) + 3*(om2*om2)*(18 - 10*np.sqrt(3) + 33*(-2 + np.sqrt(3))*om3) + 2*om2*om3*(3*(-9 + 5*np.sqrt(3)) + 84*(-2 + np.sqrt(3))*om3 + 22*(-3 + np.sqrt(3))*(om3*om3)) + om3*om3*(7*(-9 + 5*np.sqrt(3)) + 110*(-2 + np.sqrt(3))*om3 + 30*(-3 + np.sqrt(3))*(om3*om3))) + om2*om2*(om2 + om3)*(18*(-2 + np.sqrt(3))*(om2*om2*om2*om2) + 6*(om2*om2*om2)*(-9 + 5*np.sqrt(3) - 6*(-2 + np.sqrt(3))*om3) + 5*(om3*om3)*(-77 + 44*np.sqrt(3) + 15*(-9 + 5*np.sqrt(3))*om3) + 3*(om2*om2)*(-7 + 4*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om3 + 222*(-2 + np.sqrt(3))*(om3*om3)) + om2*om3*(42 - 24*np.sqrt(3) + 145*(-9 + 5*np.sqrt(3))*om3 + 720*(-2 + np.sqrt(3))*(om3*om3))) + om1*om1*om1*(36*(-3 + np.sqrt(3))*(om2*om2*om2*om2*om2) - 36*(om2*om2*om2*om2)*(13*(-2 + np.sqrt(3)) + (-3 + np.sqrt(3))*om3) + 3*(om2*om2*om2)*(27*(9 - 5*np.sqrt(3)) + 156*(-2 + np.sqrt(3))*om3 + 322*(-3 + np.sqrt(3))*(om3*om3)) + 6*om2*om3*(-14 + 8*np.sqrt(3) + 21*(-9 + 5*np.sqrt(3))*om3 + 66*(-2 + np.sqrt(3))*(om3*om3) - 30*(-3 + np.sqrt(3))*(om3*om3*om3)) + 3*(om2*om2)*(28 - 16*np.sqrt(3) + 27*(-9 + 5*np.sqrt(3))*om3 - 84*(-2 + np.sqrt(3))*(om3*om3) + 374*(-3 + np.sqrt(3))*(om3*om3*om3)) + 2*(om3*om3)*(7*(-7 + 4*np.sqrt(3)) + 4*om3*(-99 + 55*np.sqrt(3) + 45*(-2 + np.sqrt(3))*om3))) + om1*om1*(144*(-3 + np.sqrt(3))*(om2*om2*om2*om2*om2*om2) - 144*(-3 + np.sqrt(3))*(om2*om2*om2*om2*om2)*om3 + 2*(om3*om3*om3)*(-77 + 44*np.sqrt(3) + 15*(-9 + 5*np.sqrt(3))*om3) + 24*(om2*om2*om2*om2)*(18 - 10*np.sqrt(3) + 133*(-3 + np.sqrt(3))*(om3*om3)) + 4*om2*(om3*om3)*(7*(-7 + 4*np.sqrt(3)) - 90*(-2 + np.sqrt(3))*(om3*om3)) + 3*(om2*om2)*om3*(7*(-7 + 4*np.sqrt(3)) + 4*om3*(63 - 35*np.sqrt(3) + 77*(-2 + np.sqrt(3))*om3 + 150*(-3 + np.sqrt(3))*(om3*om3))) + 3*(om2*om2*om2)*(49 - 28*np.sqrt(3) + 4*om3*(4*(-9 + 5*np.sqrt(3)) + 63*(-2 + np.sqrt(3))*om3 + 440*(-3 + np.sqrt(3))*(om3*om3)))) + om1*om2*(54*(-3 + np.sqrt(3))*(om2*om2*om2*om2*om2*om2) + 2*(om3*om3*om3)*(77 - 44*np.sqrt(3) + 15*(9 - 5*np.sqrt(3))*om3) - 54*(om2*om2*om2*om2*om2)*(4 - 2*np.sqrt(3) + (-3 + np.sqrt(3))*om3) + 6*om2*(om3*om3)*(49 - 28*np.sqrt(3) + 22*(-9 + 5*np.sqrt(3))*om3 + 225*(-2 + np.sqrt(3))*(om3*om3)) + 3*(om2*om2*om2*om2)*(-9 + 5*np.sqrt(3) + 18*om3*(4 - 2*np.sqrt(3) + 35*(-3 + np.sqrt(3))*om3)) + 2*(om2*om2)*om3*(3*(-7 + 4*np.sqrt(3)) + 4*om3*(14*(-9 + 5*np.sqrt(3)) + 45*om3*(11*(-2 + np.sqrt(3)) + 6*(-3 + np.sqrt(3))*om3))) + 3*(om2*om2*om2)*(14 - 8*np.sqrt(3) + om3*(9 - 5*np.sqrt(3) + 42*om3*(19*(-2 + np.sqrt(3)) + 33*(-3 + np.sqrt(3))*om3)))))/(36.*om1*(om1 + om2)*(om1 + om2 + om3)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * ff[-1] + (
(-18*(-2 + np.sqrt(3))*(om1*om1*om1*om1)*(om2 - om3) + 6*(om1*om1*om1)*(-12*(-2 + np.sqrt(3))*(om2*om2) + om2*(9 - 5*np.sqrt(3) + 15*(-2 + np.sqrt(3))*om3) + om3*(-9 + 5*np.sqrt(3) + 18*(-2 + np.sqrt(3))*om3)) + om2*(-18*(-2 + np.sqrt(3))*(om2*om2*om2*om2) + 5*(om3*om3)*(77 - 44*np.sqrt(3) + 15*(9 - 5*np.sqrt(3))*om3) + 6*(om2*om2*om2)*(9 - 5*np.sqrt(3) + 6*(-2 + np.sqrt(3))*om3) + om2*om3*(6*(-7 + 4*np.sqrt(3)) + 145*(9 - 5*np.sqrt(3))*om3 - 720*(-2 + np.sqrt(3))*(om3*om3)) - 3*(om2*om2)*(-7 + 4*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om3 + 222*(-2 + np.sqrt(3))*(om3*om3))) + om1*om1*(-108*(-2 + np.sqrt(3))*(om2*om2*om2) + 18*(om2*om2)*(9 - 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om3) + om3*(3*(-7 + 4*np.sqrt(3)) + 29*(-9 + 5*np.sqrt(3))*om3 + 90*(-2 + np.sqrt(3))*(om3*om3)) - 3*om2*(-7 + 4*np.sqrt(3) + 2*om3*(36 - 20*np.sqrt(3) + 75*(-2 + np.sqrt(3))*om3))) + om1*(-72*(-2 + np.sqrt(3))*(om2*om2*om2*om2) + 18*(om2*om2*om2)*(9 - 5*np.sqrt(3) + 7*(-2 + np.sqrt(3))*om3) + om3*om3*(-77 + 44*np.sqrt(3) + 15*(-9 + 5*np.sqrt(3))*om3) + om2*om3*(9*(-7 + 4*np.sqrt(3)) + 116*(9 - 5*np.sqrt(3))*om3 - 630*(-2 + np.sqrt(3))*(om3*om3)) - 6*(om2*om2)*(-7 + 4*np.sqrt(3) + om3*(45 - 25*np.sqrt(3) + 204*(-2 + np.sqrt(3))*om3))))/(18.*om1*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * ff[-2] + (
(om2*(-12*(-2 + np.sqrt(3))*(om1*om1*om1*om1) + 2*(om1*om1*om1)*(18 - 10*np.sqrt(3) + 3*(-2 + np.sqrt(3))*om2 + 15*(-2 + np.sqrt(3))*om3) + 3*(om2 + om3)*((-9 + 5*np.sqrt(3))*(om2*om2) + om2*(-7 + 4*np.sqrt(3) + 14*(9 - 5*np.sqrt(3))*om3) + om3*(77 - 44*np.sqrt(3) + 15*(9 - 5*np.sqrt(3))*om3)) + 2*(om1*om1)*(7 - 4*np.sqrt(3) + 24*(-2 + np.sqrt(3))*(om2*om2) + 5*(-9 + 5*np.sqrt(3))*om3 - 204*(-2 + np.sqrt(3))*(om3*om3) + om2*(-9 + 5*np.sqrt(3) - 180*(-2 + np.sqrt(3))*om3)) + om1*(30*(-2 + np.sqrt(3))*(om2*om2*om2) + om2*om2*(9*(-9 + 5*np.sqrt(3)) - 390*(-2 + np.sqrt(3))*om3) + om2*(-7 + 4*np.sqrt(3) + 78*(9 - 5*np.sqrt(3))*om3 - 870*(-2 + np.sqrt(3))*(om3*om3)) + om3*(5*(-7 + 4*np.sqrt(3)) + 87*(9 - 5*np.sqrt(3))*om3 - 450*(-2 + np.sqrt(3))*(om3*om3)))))/(12.*(om1 + om2)*(-3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) + 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 + (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) + 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * ff[-3] + (
((-12*(-2 + np.sqrt(3))*(om1*om1*om1*om1) + 3*(om2*om2)*(56 - 32*np.sqrt(3) + (99 - 55*np.sqrt(3))*om2) + 4*(om1*om1*om1)*(9 - 5*np.sqrt(3) + 6*(-2 + np.sqrt(3))*om2) + om1*om2*(4*(-7 + 4*np.sqrt(3)) + 63*(9 - 5*np.sqrt(3))*om2 - 330*(-2 + np.sqrt(3))*(om2*om2)) + om1*om1*(14 - 8*np.sqrt(3) + 8*(-9 + 5*np.sqrt(3))*om2 - 294*(-2 + np.sqrt(3))*(om2*om2)))*om3)/(9.*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*((om1 + om2)*(om1 + om2)*(om1 + om2))*om3 - 7*(om1 + om2)*(2*(om1*om1*om1) + 6*(om1*om1)*om2 - 14*om1*(om2*om2) + 45*(om2*om2*om2))*(om3*om3) - 11*(4*(om1*om1*om1) + 17*om1*(om2*om2) + 63*(om2*om2*om2))*(om3*om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3*om3)))
) * ff[-4]
)
u2int = (
(-18*(2 + np.sqrt(3) + 18*(om1*om1))*(om2*om2*om2*om2*om2*om2*om2) - 3*(om2*om2*om2*om2*om2)*(7 + 4*np.sqrt(3) + 2*om1*(5*(9 + 5*np.sqrt(3)) + 45*(2 + np.sqrt(3))*om1 + 36*(om1*om1*om1)) - 2*(9 + 5*np.sqrt(3))*om3 - 36*om1*(2 + np.sqrt(3) + 8*(om1*om1))*om3 + 210*(2 + np.sqrt(3) + 18*(om1*om1))*(om3*om3)) + 2*(om1*om1)*(om3*om3)*(7*om1*(3*(7 + 4*np.sqrt(3)) + 4*om1*(2*(9 + 5*np.sqrt(3)) + 9*(2 + np.sqrt(3))*om1 - 9*(om1*om1*om1))) + 22*(7 + 4*np.sqrt(3) + 4*om1*(9 + 5*np.sqrt(3) + 6*(2 + np.sqrt(3))*om1 - 9*(om1*om1*om1)))*om3 + 30*(9 + 5*np.sqrt(3) + 9*om1*(2 + np.sqrt(3) - 2*(om1*om1)))*(om3*om3)) + om2*om2*om2*om2*(12*om1*(-7 - 4*np.sqrt(3) - 8*(9 + 5*np.sqrt(3))*om1 - 60*(2 + np.sqrt(3))*(om1*om1) + 108*(om1*om1*om1*om1)) + 3*(7 + 4*np.sqrt(3) + 10*(9 + 5*np.sqrt(3))*om1 + 90*(2 + np.sqrt(3))*(om1*om1) + 72*(om1*om1*om1*om1))*om3 - 133*(9 + 5*np.sqrt(3) + 18*om1*(2 + np.sqrt(3) + 8*(om1*om1)))*(om3*om3) - 1386*(2 + np.sqrt(3) + 18*(om1*om1))*(om3*om3*om3)) + om2*om2*om2*(3*(om1*om1)*(-15*(7 + 4*np.sqrt(3)) - 56*(9 + 5*np.sqrt(3))*om1 - 324*(2 + np.sqrt(3))*(om1*om1) + 468*(om1*om1*om1*om1)) + 12*om1*(7 + 4*np.sqrt(3) + 8*(9 + 5*np.sqrt(3))*om1 + 60*(2 + np.sqrt(3))*(om1*om1) - 108*(om1*om1*om1*om1))*om3 - 7*(7*(7 + 4*np.sqrt(3)) + 52*(9 + 5*np.sqrt(3))*om1 + 468*(2 + np.sqrt(3))*(om1*om1) + 828*(om1*om1*om1*om1))*(om3*om3) - 220*(9 + 5*np.sqrt(3) + 18*om1*(2 + np.sqrt(3) + 8*(om1*om1)))*(om3*om3*om3) - 720*(2 + np.sqrt(3) + 18*(om1*om1))*(om3*om3*om3*om3)) - 6*(om2*om2*om2*om2*om2*om2)*(9 + 5*np.sqrt(3) - 3*(2 + np.sqrt(3))*om3 + 18*om1*(2 + np.sqrt(3) + 8*(om1*om1) - 3*om1*om3)) + 2*om1*om2*om3*(-216*(om1*om1*om1*om1*om1*om1) - 1260*(om1*om1*om1*om1*om1)*om3 - 2*(om3*om3)*(77 + 44*np.sqrt(3) + 15*(9 + 5*np.sqrt(3))*om3) + 72*(om1*om1*om1*om1)*(6 + 3*np.sqrt(3) - 11*(om3*om3)) + 3*om1*om3*(7*(7 + 4*np.sqrt(3)) - 90*(2 + np.sqrt(3))*(om3*om3)) + 2*(om1*om1)*(9*(7 + 4*np.sqrt(3)) + 56*(9 + 5*np.sqrt(3))*om3 + 132*(2 + np.sqrt(3))*(om3*om3)) + 12*(om1*om1*om1)*(4*(9 + 5*np.sqrt(3)) + 63*(2 + np.sqrt(3))*om3 + 45*(om3*om3*om3))) + om2*om2*(-432*(2 + np.sqrt(3))*(om1*om1*om1*om1*om1) + 432*(om1*om1*om1*om1*om1*om1*om1) - 1404*(om1*om1*om1*om1*om1*om1)*om3 - 5*(om3*om3*om3)*(77 + 44*np.sqrt(3) + 15*(9 + 5*np.sqrt(3))*om3) + 3*(om1*om1)*om3*(15*(7 + 4*np.sqrt(3)) - 56*(9 + 5*np.sqrt(3))*om3 - 1188*(2 + np.sqrt(3))*(om3*om3)) - 6*om1*(om3*om3)*(98 + 56*np.sqrt(3) + 66*(9 + 5*np.sqrt(3))*om3 + 225*(2 + np.sqrt(3))*(om3*om3)) + 12*(om1*om1*om1*om1)*(-8*(9 + 5*np.sqrt(3)) + 81*(2 + np.sqrt(3))*om3 - 561*(om3*om3*om3)) - 12*(om1*om1*om1)*(3*(7 + 4*np.sqrt(3)) - 14*(9 + 5*np.sqrt(3))*om3 + 42*(2 + np.sqrt(3))*(om3*om3) + 900*(om3*om3*om3*om3))))/(36.*(om1*om1)*(om1 + om2)*(om1 + om2 + om3)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * uu[-1] + (
(18*(2 + np.sqrt(3))*(om2*om2*om2*om2*om2) + 6*(om2*om2*om2*om2)*(9 + 5*np.sqrt(3) + 12*(2 + np.sqrt(3))*om1 - 6*(2 + np.sqrt(3))*om3) + om1*om3*(-3*om1*(7 + 4*np.sqrt(3) + 2*om1*(9 + 5*np.sqrt(3) + 3*(2 + np.sqrt(3))*om1)) + (-11*(7 + 4*np.sqrt(3)) - 29*(9 + 5*np.sqrt(3))*om1 - 108*(2 + np.sqrt(3))*(om1*om1))*om3 - 15*(9 + 5*np.sqrt(3) + 6*(2 + np.sqrt(3))*om1)*(om3*om3)) + 3*(om2*om2*om2)*(7 + 4*np.sqrt(3) + 36*(2 + np.sqrt(3))*(om1*om1) - 4*(9 + 5*np.sqrt(3))*om3 + 222*(2 + np.sqrt(3))*(om3*om3) + 6*om1*(9 + 5*np.sqrt(3) - 7*(2 + np.sqrt(3))*om3)) + om2*(18*(2 + np.sqrt(3))*(om1*om1*om1*om1) + 6*(om1*om1*om1)*(9 + 5*np.sqrt(3) - 15*(2 + np.sqrt(3))*om3) + 5*(om3*om3)*(77 + 44*np.sqrt(3) + 15*(9 + 5*np.sqrt(3))*om3) + 3*(om1*om1)*(7 + 4*np.sqrt(3) - 8*(9 + 5*np.sqrt(3))*om3 + 150*(2 + np.sqrt(3))*(om3*om3)) + om1*om3*(-9*(7 + 4*np.sqrt(3)) + 116*(9 + 5*np.sqrt(3))*om3 + 630*(2 + np.sqrt(3))*(om3*om3))) + om2*om2*(72*(2 + np.sqrt(3))*(om1*om1*om1) - 18*(om1*om1)*(-9 - 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om3) + 6*om1*(7 + 4*np.sqrt(3) - 5*(9 + 5*np.sqrt(3))*om3 + 204*(2 + np.sqrt(3))*(om3*om3)) + om3*(-6*(7 + 4*np.sqrt(3)) + 145*(9 + 5*np.sqrt(3))*om3 + 720*(2 + np.sqrt(3))*(om3*om3))))/(36.*(om1*om1)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * uu[-2] + (
(-12*(2 + np.sqrt(3))*(om1*om1*om1*om1) + 2*(om1*om1*om1)*(-2*(9 + 5*np.sqrt(3)) + 3*(2 + np.sqrt(3))*om2 + 15*(2 + np.sqrt(3))*om3) + 2*(om1*om1)*(-7 - 4*np.sqrt(3) + 24*(2 + np.sqrt(3))*(om2*om2) + 5*(9 + 5*np.sqrt(3))*om3 - 204*(2 + np.sqrt(3))*(om3*om3) + om2*(9 + 5*np.sqrt(3) - 180*(2 + np.sqrt(3))*om3)) + 3*(om2 + om3)*((9 + 5*np.sqrt(3))*(om2*om2) + om2*(7 + 4*np.sqrt(3) - 14*(9 + 5*np.sqrt(3))*om3) - om3*(77 + 44*np.sqrt(3) + 15*(9 + 5*np.sqrt(3))*om3)) + om1*(30*(2 + np.sqrt(3))*(om2*om2*om2) + om2*om2*(9*(9 + 5*np.sqrt(3)) - 390*(2 + np.sqrt(3))*om3) + om3*(5*(7 + 4*np.sqrt(3)) - 87*(9 + 5*np.sqrt(3))*om3 - 450*(2 + np.sqrt(3))*(om3*om3)) - om2*(-7 - 4*np.sqrt(3) + 78*(9 + 5*np.sqrt(3))*om3 + 870*(2 + np.sqrt(3))*(om3*om3))))/(36.*(om1 + om2)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * uu[-3] + (
(12*(2 + np.sqrt(3))*(om1*om1*om1*om1) + 3*(om2*om2)*(56 + 32*np.sqrt(3) + 99*om2 + 55*np.sqrt(3)*om2) + 4*(om1*om1*om1)*(9 + 5*np.sqrt(3) - 6*(2 + np.sqrt(3))*om2) + 2*(om1*om1)*(7 + 4*np.sqrt(3) - 4*(9 + 5*np.sqrt(3))*om2 + 147*(2 + np.sqrt(3))*(om2*om2)) + om1*om2*(-4*(7 + 4*np.sqrt(3)) + 63*(9 + 5*np.sqrt(3))*om2 + 330*(2 + np.sqrt(3))*(om2*om2)))/(36.*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*((om1 + om2)*(om1 + om2)*(om1 + om2))*om3 - 7*(om1 + om2)*(2*(om1*om1*om1) + 6*(om1*om1)*om2 - 14*om1*(om2*om2) + 45*(om2*om2*om2))*(om3*om3) - 11*(4*(om1*om1*om1) + 17*om1*(om2*om2) + 63*(om2*om2*om2))*(om3*om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3*om3)))
) * uu[-4] + h * (
(
-(-12*(3 + np.sqrt(3))*(om1*om1*om1*om1*om1*om1)*(6*(om2*om2) - 6*om2*om3 - 7*(om3*om3)) - 6*(om1*om1*om1*om1*om1)*(39*(3 + np.sqrt(3))*(om2*om2*om2) + om2*om2*(36*(2 + np.sqrt(3)) - 39*(3 + np.sqrt(3))*om3) - 2*(om3*om3)*(21*(2 + np.sqrt(3)) + 22*(3 + np.sqrt(3))*om3) - 2*om2*om3*(18*(2 + np.sqrt(3)) + 35*(3 + np.sqrt(3))*om3)) + om2*om2*(om2 + om3)*(18*(2 + np.sqrt(3))*(om2*om2*om2*om2) + 6*(om2*om2*om2)*(9 + 5*np.sqrt(3) - 6*(2 + np.sqrt(3))*om3) + 5*(om3*om3)*(77 + 44*np.sqrt(3) + 15*(9 + 5*np.sqrt(3))*om3) + 3*(om2*om2)*(7 + 4*np.sqrt(3) - 4*(9 + 5*np.sqrt(3))*om3 + 222*(2 + np.sqrt(3))*(om3*om3)) + om2*om3*(-6*(7 + 4*np.sqrt(3)) + 145*(9 + 5*np.sqrt(3))*om3 + 720*(2 + np.sqrt(3))*(om3*om3))) - 6*(om1*om1*om1*om1)*(36*(3 + np.sqrt(3))*(om2*om2*om2*om2) + om2*om2*(6*(9 + 5*np.sqrt(3)) - 99*(2 + np.sqrt(3))*om3) - 9*(om2*om2*om2)*(-11*(2 + np.sqrt(3)) + 4*(3 + np.sqrt(3))*om3) - 2*om2*om3*(3*(9 + 5*np.sqrt(3)) + 84*(2 + np.sqrt(3))*om3 + 22*(3 + np.sqrt(3))*(om3*om3)) - om3*om3*(7*(9 + 5*np.sqrt(3)) + 110*(2 + np.sqrt(3))*om3 + 30*(3 + np.sqrt(3))*(om3*om3))) + om1*om1*om1*(36*(3 + np.sqrt(3))*(om2*om2*om2*om2*om2) - 36*(om2*om2*om2*om2)*(13*(2 + np.sqrt(3)) + (3 + np.sqrt(3))*om3) + 3*(om2*om2*om2)*(-27*(9 + 5*np.sqrt(3)) + 156*(2 + np.sqrt(3))*om3 + 322*(3 + np.sqrt(3))*(om3*om3)) + 6*om2*om3*(14 + 8*np.sqrt(3) + 21*(9 + 5*np.sqrt(3))*om3 + 66*(2 + np.sqrt(3))*(om3*om3) - 30*(3 + np.sqrt(3))*(om3*om3*om3)) + 3*(om2*om2)*(-4*(7 + 4*np.sqrt(3)) + 27*(9 + 5*np.sqrt(3))*om3 - 84*(2 + np.sqrt(3))*(om3*om3) + 374*(3 + np.sqrt(3))*(om3*om3*om3)) + 2*(om3*om3)*(7*(7 + 4*np.sqrt(3)) + 4*om3*(99 + 55*np.sqrt(3) + 45*(2 + np.sqrt(3))*om3))) + om1*om1*(144*(3 + np.sqrt(3))*(om2*om2*om2*om2*om2*om2) - 144*(3 + np.sqrt(3))*(om2*om2*om2*om2*om2)*om3 + 2*(om3*om3*om3)*(77 + 44*np.sqrt(3) + 15*(9 + 5*np.sqrt(3))*om3) + 4*om2*(om3*om3)*(7*(7 + 4*np.sqrt(3)) - 90*(2 + np.sqrt(3))*(om3*om3)) + 24*(om2*om2*om2*om2)*(-2*(9 + 5*np.sqrt(3)) + 133*(3 + np.sqrt(3))*(om3*om3)) + 3*(om2*om2)*om3*(7*(7 + 4*np.sqrt(3)) + 4*om3*(-7*(9 + 5*np.sqrt(3)) + 77*(2 + np.sqrt(3))*om3 + 150*(3 + np.sqrt(3))*(om3*om3))) + 3*(om2*om2*om2)*(-7*(7 + 4*np.sqrt(3)) + 4*om3*(4*(9 + 5*np.sqrt(3)) + 63*(2 + np.sqrt(3))*om3 + 440*(3 + np.sqrt(3))*(om3*om3)))) + om1*om2*(54*(3 + np.sqrt(3))*(om2*om2*om2*om2*om2*om2) - 54*(om2*om2*om2*om2*om2)*(-2*(2 + np.sqrt(3)) + (3 + np.sqrt(3))*om3) - 2*(om3*om3*om3)*(77 + 44*np.sqrt(3) + 15*(9 + 5*np.sqrt(3))*om3) + 6*om2*(om3*om3)*(-7*(7 + 4*np.sqrt(3)) + 22*(9 + 5*np.sqrt(3))*om3 + 225*(2 + np.sqrt(3))*(om3*om3)) + 3*(om2*om2*om2*om2)*(9 + 5*np.sqrt(3) - 36*(2 + np.sqrt(3))*om3 + 630*(3 + np.sqrt(3))*(om3*om3)) + 2*(om2*om2)*om3*(3*(7 + 4*np.sqrt(3)) + 4*om3*(14*(9 + 5*np.sqrt(3)) + 45*om3*(11*(2 + np.sqrt(3)) + 6*(3 + np.sqrt(3))*om3))) + 3*(om2*om2*om2)*(-2*(7 + 4*np.sqrt(3)) + om3*(-9 - 5*np.sqrt(3) + 42*om3*(19*(2 + np.sqrt(3)) + 33*(3 + np.sqrt(3))*om3)))))/(36.*om1*(om1 + om2)*(om1 + om2 + om3)*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * ff[-1] + (
(18*(2 + np.sqrt(3))*(om2*om2*om2*om2*om2) + 6*(om2*om2*om2*om2)*(9 + 5*np.sqrt(3) + 12*(2 + np.sqrt(3))*om1 - 6*(2 + np.sqrt(3))*om3) + om1*om3*(-3*om1*(7 + 4*np.sqrt(3) + 2*om1*(9 + 5*np.sqrt(3) + 3*(2 + np.sqrt(3))*om1)) + (-11*(7 + 4*np.sqrt(3)) - 29*(9 + 5*np.sqrt(3))*om1 - 108*(2 + np.sqrt(3))*(om1*om1))*om3 - 15*(9 + 5*np.sqrt(3) + 6*(2 + np.sqrt(3))*om1)*(om3*om3)) + 3*(om2*om2*om2)*(7 + 4*np.sqrt(3) + 36*(2 + np.sqrt(3))*(om1*om1) - 4*(9 + 5*np.sqrt(3))*om3 + 222*(2 + np.sqrt(3))*(om3*om3) + 6*om1*(9 + 5*np.sqrt(3) - 7*(2 + np.sqrt(3))*om3)) + om2*(18*(2 + np.sqrt(3))*(om1*om1*om1*om1) + 6*(om1*om1*om1)*(9 + 5*np.sqrt(3) - 15*(2 + np.sqrt(3))*om3) + 5*(om3*om3)*(77 + 44*np.sqrt(3) + 15*(9 + 5*np.sqrt(3))*om3) + 3*(om1*om1)*(7 + 4*np.sqrt(3) - 8*(9 + 5*np.sqrt(3))*om3 + 150*(2 + np.sqrt(3))*(om3*om3)) + om1*om3*(-9*(7 + 4*np.sqrt(3)) + 116*(9 + 5*np.sqrt(3))*om3 + 630*(2 + np.sqrt(3))*(om3*om3))) + om2*om2*(72*(2 + np.sqrt(3))*(om1*om1*om1) - 18*(om1*om1)*(-9 - 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om3) + 6*om1*(7 + 4*np.sqrt(3) - 5*(9 + 5*np.sqrt(3))*om3 + 204*(2 + np.sqrt(3))*(om3*om3)) + om3*(-6*(7 + 4*np.sqrt(3)) + 145*(9 + 5*np.sqrt(3))*om3 + 720*(2 + np.sqrt(3))*(om3*om3))))/(18.*om1*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 - (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * ff[-2] + (
(om2*(12*(2 + np.sqrt(3))*(om1*om1*om1*om1) - 2*(om1*om1*om1)*(-2*(9 + 5*np.sqrt(3)) + 3*(2 + np.sqrt(3))*om2 + 15*(2 + np.sqrt(3))*om3) + 2*(om1*om1)*(7 + 4*np.sqrt(3) - 24*(2 + np.sqrt(3))*(om2*om2) - 5*(9 + 5*np.sqrt(3))*om3 + 204*(2 + np.sqrt(3))*(om3*om3) + om2*(-9 - 5*np.sqrt(3) + 180*(2 + np.sqrt(3))*om3)) - 3*(om2 + om3)*((9 + 5*np.sqrt(3))*(om2*om2) + om2*(7 + 4*np.sqrt(3) - 14*(9 + 5*np.sqrt(3))*om3) - om3*(77 + 44*np.sqrt(3) + 15*(9 + 5*np.sqrt(3))*om3)) + om1*(-30*(2 + np.sqrt(3))*(om2*om2*om2) + om2*om2*(-9*(9 + 5*np.sqrt(3)) + 390*(2 + np.sqrt(3))*om3) + om3*(-5*(7 + 4*np.sqrt(3)) + 87*(9 + 5*np.sqrt(3))*om3 + 450*(2 + np.sqrt(3))*(om3*om3)) + om2*(-7 - 4*np.sqrt(3) + 78*(9 + 5*np.sqrt(3))*om3 + 870*(2 + np.sqrt(3))*(om3*om3)))))/(12.*(om1 + om2)*(-3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)) + 3*(4*om1 - 3*om2)*om2*(om1 + om2)*(om1 + 2*om2)*om3 + (14*(om1*om1*om1) + 30*(om1*om1)*om2 - 113*om1*(om2*om2) + 333*(om2*om2*om2))*(om3*om3) + 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3)))
) * ff[-3] + (
((12*(2 + np.sqrt(3))*(om1*om1*om1*om1) + 3*(om2*om2)*(56 + 32*np.sqrt(3) + 99*om2 + 55*np.sqrt(3)*om2) + 4*(om1*om1*om1)*(9 + 5*np.sqrt(3) - 6*(2 + np.sqrt(3))*om2) + 2*(om1*om1)*(7 + 4*np.sqrt(3) - 4*(9 + 5*np.sqrt(3))*om2 + 147*(2 + np.sqrt(3))*(om2*om2)) + om1*om2*(-4*(7 + 4*np.sqrt(3)) + 63*(9 + 5*np.sqrt(3))*om2 + 330*(2 + np.sqrt(3))*(om2*om2)))*om3)/(9.*(3*(4*om1 - 3*om2)*(om2*om2)*((om1 + om2)*(om1 + om2)*(om1 + om2)) - 3*(4*om1 - 3*om2)*om2*((om1 + om2)*(om1 + om2)*(om1 + om2))*om3 - 7*(om1 + om2)*(2*(om1*om1*om1) + 6*(om1*om1)*om2 - 14*om1*(om2*om2) + 45*(om2*om2*om2))*(om3*om3) - 11*(4*(om1*om1*om1) + 17*om1*(om2*om2) + 63*(om2*om2*om2))*(om3*om3*om3) - 30*(om1*om1 - 2*om1*om2 + 12*(om2*om2))*(om3*om3*om3*om3)))
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_eBDF4AS(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_eBDF4AS, adaptive_step_eBDF4AS,
**kwargs)
def cons_or_diss_eBDF4AS(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_eBDF4AS, adaptive_step_eBDF4AS,
fixed_estimate_eBDF4AS, adaptive_estimate_eBDF4AS,
**kwargs)
# explicit difference correction methods of Arévalo, Claus & Söderlind (2000), with variable step size version of Arévalo & Söderlind (2017)
def fixed_step_EDC22(uu, ff, h):
u_new = (
1.3333333333333333
) * uu[-1] + (
-0.3333333333333333
) * uu[-2] + h * (
(
1.7777777777777777
) * ff[-1] + (
-1.5555555555555556
) * ff[-2] + (
0.4444444444444444
) * ff[-3]
)
return u_new
def fixed_estimate_EDC22(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
1.0119509986711015
) * uu[-1] + (
-0.011950998671101448
) * uu[-2] + h * (
(
0.24141182965239458
) * ff[-1] + (
-0.055771327131806755
) * ff[-2] + (
0.013733364213497898
) * ff[-3]
)
u2int = (
1.196382334662232
) * uu[-1] + (
-0.19638233466223187
) * uu[-2] + h * (
(
1.255115948125383
) * ff[-1] + (
-0.9164508950904154
) * ff[-2] + (
0.2536277468976132
) * ff[-3]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_EDC22(uu, ff, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
(-2 - 3*om2 + om1*(-3 + om1*om1 - 25*om1*om2))/(om1*om1*(om1 - 25*om2))
) * uu[-1] + (
(2 + 3*om1 + 3*om2)/(om1*om1*(om1 - 25*om2))
) * uu[-2] + h * (
(
(3*om1*((1 + om1)*(1 + om1)) - 2*(11 + 33*om1 + 36*(om1*om1))*om2 - 3*(11 + 25*om1)*(om2*om2))/(3.*om1*(om1 - 25*om2)*(om1 + om2))
) * ff[-1] + (
(28 + 42*om1 + 42*om2)/(3*(om1*om1) - 75*om1*om2)
) * ff[-2] + (
-(25 + 39*om1)/(3.*(om1 - 25*om2)*(om1 + om2))
) * ff[-3]
)
return u_new
def adaptive_estimate_EDC22(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = (
(-9 + 5*np.sqrt(3) + 9*om1*(-2 + np.sqrt(3) + 2*om1*(om1 - 25*om2)) + 9*(-2 + np.sqrt(3))*om2)/(18.*(om1*om1)*(om1 - 25*om2))
) * uu[-1] + (
-(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om1 + 9*(-2 + np.sqrt(3))*om2)/(18.*(om1*om1)*(om1 - 25*om2))
) * uu[-2] + h * (
(
(-18*(-3 + np.sqrt(3))*(om1*om1*om1) + 36*(om1*om1)*(2 - np.sqrt(3) + 12*(-3 + np.sqrt(3))*om2) + 22*om2*(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om2) + 3*om1*(9 - 5*np.sqrt(3) + 6*om2*(22*(-2 + np.sqrt(3)) + 25*(-3 + np.sqrt(3))*om2)))/(108.*om1*(om1 - 25*om2)*(om1 + om2))
) * ff[-1] + (
(-7*(-9 + 5*np.sqrt(3) + 9*(-2 + np.sqrt(3))*om1 + 9*(-2 + np.sqrt(3))*om2))/(27.*om1*(om1 - 25*om2))
) * ff[-2] + (
(25*(-9 + 5*np.sqrt(3)) + 234*(-2 + np.sqrt(3))*om1)/(108.*(om1 - 25*om2)*(om1 + om2))
) * ff[-3]
)
u2int = (
-(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om2 + 9*om1*(2 + np.sqrt(3) - 2*(om1*om1) + 50*om1*om2))/(18.*(om1*om1)*(om1 - 25*om2))
) * uu[-1] + (
(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om1 + 9*(2 + np.sqrt(3))*om2)/(18.*(om1*om1)*(om1 - 25*om2))
) * uu[-2] + h * (
(
-(-18*(3 + np.sqrt(3))*(om1*om1*om1) + 22*om2*(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om2) + 36*(om1*om1)*(-2 - np.sqrt(3) + 12*(3 + np.sqrt(3))*om2) + 3*om1*(-9 - 5*np.sqrt(3) + 6*om2*(22*(2 + np.sqrt(3)) + 25*(3 + np.sqrt(3))*om2)))/(108.*om1*(om1 - 25*om2)*(om1 + om2))
) * ff[-1] + (
(7*(9 + 5*np.sqrt(3) + 9*(2 + np.sqrt(3))*om1 + 9*(2 + np.sqrt(3))*om2))/(27.*om1*(om1 - 25*om2))
) * ff[-2] + (
-(25*(9 + 5*np.sqrt(3)) + 234*(2 + np.sqrt(3))*om1)/(108.*(om1 - 25*om2)*(om1 + om2))
) * ff[-3]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_EDC22(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_EDC22, adaptive_step_EDC22,
**kwargs)
def cons_or_diss_EDC22(f, t_final, t0, u0, t1, u1, t2, u2,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2], [u0, u1, u2],
fixed_step_EDC22, adaptive_step_EDC22,
fixed_estimate_EDC22, adaptive_estimate_EDC22,
**kwargs)
def fixed_step_EDC23(uu, ff, h):
u_new = (
1.3333333333333333
) * uu[-1] + (
-0.3333333333333333
) * uu[-2] + h * (
(
2.1666666666666665
) * ff[-1] + (
-2.7222222222222223
) * ff[-2] + (
1.6111111111111112
) * ff[-3] + (
-0.3888888888888889
) * ff[-4]
)
return u_new
def fixed_estimate_EDC23(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
1.010183197601602
) * uu[-1] + (
-0.010183197601602053
) * uu[-2] + h * (
(
0.25167174254055585
) * ff[-1] + (
-0.08316278041308342
) * ff[-2] + (
0.04215603478531588
) * ff[-3] + (
-0.009523329109203206
) * ff[-4]
)
u2int = (
1.1884985349784358
) * uu[-1] + (
-0.1884985349784356
) * uu[-2] + h * (
(
1.4678040892234117
) * ff[-1] + (
-1.5394047023238908
) * ff[-2] + (
0.8811804372799698
) * ff[-3] + (
-0.2094032245631132
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_EDC23(uu, ff, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
(-3 + om1*om1*om1*om1 - 8*om2 - 4*om3 - 6*om2*(om2 + om3) + 2*(om1*om1*om1)*(2*om2 + om3) - 2*om1*(4 + 6*om2 + 3*om3) - 2*(om1*om1)*(3 + 46*om2*(om2 + om3)))/(om1*om1*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-1] + (
(3 + 6*(om1*om1) + 8*om2 + 4*om3 + 6*om2*(om2 + om3) + 2*om1*(4 + 6*om2 + 3*om3))/(om1*om1*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-2] + h * (
(
(6*(om1*om1*om1*om1*om1) + 18*(om1*om1*om1*om1)*(1 + 2*om2 + om3) - 6*(om1*om1)*(-1 + 3*om2*(-4 + 39*om2 + 60*(om2*om2)) - 6*om3 + 9*om2*(13 + 30*om2)*om3 + 2*(-2 + 45*om2)*(om3*om3)) + 3*om1*(-2*om2*(-3 + 4*om2*(1 + om2)*(20 + 23*om2)) + 3*om3 - 4*om2*(40 + om2*(129 + 92*om2))*om3 - 4*(-1 + om2*(43 + 46*om2))*(om3*om3)) - 43*om2*(om2 + om3)*(3 + 4*om3 + 2*om2*(4 + 3*om2 + 3*om3)) + 3*(om1*om1*om1)*(6 - 166*(om2*om2) + om2*(30 - 166*om3) + om3*(15 + 4*om3)))/(6.*om1*(om1 + om2)*(om1 + om2 + om3)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-1] + (
(49*(3 + 6*(om1*om1) + 8*om2 + 4*om3 + 6*om2*(om2 + om3) + 2*om1*(4 + 6*om2 + 3*om3)))/(6.*om1*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-2] + (
-(-3*(om1*om1*om1) + 46*(om2 + om3)*(3 + 4*om2 + 4*om3) + 3*(om1*om1)*(-2 + 93*om2 + 93*om3) + om1*(-3 + 282*(om2*om2) + 368*om3 + 282*(om3*om3) + 4*om2*(92 + 141*om3)))/(6.*(om1 + om2)*om3*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-3] + (
(-3*om1*((1 + om1)*(1 + om1)) + (138 + om1*(368 + 279*om1))*om2 + 2*(92 + 141*om1)*(om2*om2))/(6.*om3*(om1 + om2 + om3)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-4]
)
return u_new
def adaptive_estimate_EDC23(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = (
(3*(-7 + 4*np.sqrt(3)) + 8*(-9 + 5*np.sqrt(3))*om2 + 4*(-9 + 5*np.sqrt(3))*om3 + 4*(9*(om1*om1*om1*om1) + 9*(-2 + np.sqrt(3))*om2*(om2 + om3) + 18*(om1*om1*om1)*(2*om2 + om3) + om1*(2*(-9 + 5*np.sqrt(3)) + 18*(-2 + np.sqrt(3))*om2 + 9*(-2 + np.sqrt(3))*om3) + 9*(om1*om1)*(-2 + np.sqrt(3) - 92*om2*(om2 + om3))))/(36.*(om1*om1)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-1] + (
(21 - 12*np.sqrt(3) + 8*(9 - 5*np.sqrt(3))*om2 + 4*(9 - 5*np.sqrt(3))*om3 + 4*(-9*(-2 + np.sqrt(3))*(om1*om1) - 9*(-2 + np.sqrt(3))*om2*(om2 + om3) + om1*(18 - 10*np.sqrt(3) - 18*(-2 + np.sqrt(3))*om2 - 9*(-2 + np.sqrt(3))*om3)))/(36.*(om1*om1)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-2] + h * (
(
(-36*(-3 + np.sqrt(3))*(om1*om1*om1*om1*om1) - 108*(om1*om1*om1*om1)*(-2 + np.sqrt(3) + 2*(-3 + np.sqrt(3))*om2 + (-3 + np.sqrt(3))*om3) + 18*(om1*om1*om1)*(9 - 5*np.sqrt(3) - 30*(-2 + np.sqrt(3))*om2 + 166*(-3 + np.sqrt(3))*(om2*om2) - 15*(-2 + np.sqrt(3))*om3 + 166*(-3 + np.sqrt(3))*om2*om3 - 4*(-3 + np.sqrt(3))*(om3*om3)) + 43*om2*(om2 + om3)*(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om3 + 4*om2*(2*(-9 + 5*np.sqrt(3)) + 9*(-2 + np.sqrt(3))*om2 + 9*(-2 + np.sqrt(3))*om3)) + 6*(om1*om1)*(7 - 4*np.sqrt(3) + 1080*(-3 + np.sqrt(3))*(om2*om2*om2) + 54*(om2*om2)*(13*(-2 + np.sqrt(3)) + 30*(-3 + np.sqrt(3))*om3) - 6*om3*(-9 + 5*np.sqrt(3) + 4*(-2 + np.sqrt(3))*om3) + 6*om2*(18 - 10*np.sqrt(3) + 117*(-2 + np.sqrt(3))*om3 + 90*(-3 + np.sqrt(3))*(om3*om3))) + 3*om1*(1104*(-3 + np.sqrt(3))*(om2*om2*om2*om2) + om3*(21 - 12*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om3) + 48*(om2*om2*om2)*(43*(-2 + np.sqrt(3)) + 46*(-3 + np.sqrt(3))*om3) + 8*(om2*om2)*(20*(-9 + 5*np.sqrt(3)) + 387*(-2 + np.sqrt(3))*om3 + 138*(-3 + np.sqrt(3))*(om3*om3)) + 2*om2*(21 - 12*np.sqrt(3) + 80*(-9 + 5*np.sqrt(3))*om3 + 516*(-2 + np.sqrt(3))*(om3*om3))))/(216.*om1*(om1 + om2)*(om1 + om2 + om3)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-1] + (
(-49*(3*(-7 + 4*np.sqrt(3)) + 8*(-9 + 5*np.sqrt(3))*om2 + 4*(-9 + 5*np.sqrt(3))*om3 + 4*(9*(-2 + np.sqrt(3))*(om1*om1) + 9*(-2 + np.sqrt(3))*om2*(om2 + om3) + om1*(2*(-9 + 5*np.sqrt(3)) + 18*(-2 + np.sqrt(3))*om2 + 9*(-2 + np.sqrt(3))*om3))))/(216.*om1*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-2] + (
(-18*(-2 + np.sqrt(3))*(om1*om1*om1) + 138*(-7 + 4*np.sqrt(3))*(om2 + om3) + 184*(-9 + 5*np.sqrt(3))*((om2 + om3)*(om2 + om3)) + 6*(om1*om1)*(9 - 5*np.sqrt(3) + 279*(-2 + np.sqrt(3))*om2 + 279*(-2 + np.sqrt(3))*om3) + om1*(21 - 12*np.sqrt(3) + 1692*(-2 + np.sqrt(3))*(om2*om2) + 8*om2*(46*(-9 + 5*np.sqrt(3)) + 423*(-2 + np.sqrt(3))*om3) + 4*om3*(92*(-9 + 5*np.sqrt(3)) + 423*(-2 + np.sqrt(3))*om3)))/(216.*(om1 + om2)*om3*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-3] + (
(18*(-2 + np.sqrt(3))*(om1*om1*om1) - 6*(om1*om1)*(9 - 5*np.sqrt(3) + 279*(-2 + np.sqrt(3))*om2) - 46*om2*(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om2) + om1*(3*(-7 + 4*np.sqrt(3)) - 4*om2*(92*(-9 + 5*np.sqrt(3)) + 423*(-2 + np.sqrt(3))*om2)))/(216.*om3*(om1 + om2 + om3)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-4]
)
u2int = (
-(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(-9*(om1*om1*om1*om1) + 9*(2 + np.sqrt(3))*om2*(om2 + om3) - 18*(om1*om1*om1)*(2*om2 + om3) + om1*(2*(9 + 5*np.sqrt(3)) + 18*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3) + 9*(om1*om1)*(2 + np.sqrt(3) + 92*om2*(om2 + om3))))/(36.*(om1*om1)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-1] + (
(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(9*(2 + np.sqrt(3))*(om1*om1) + 9*(2 + np.sqrt(3))*om2*(om2 + om3) + om1*(2*(9 + 5*np.sqrt(3)) + 18*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3)))/(36.*(om1*om1)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * uu[-2] + h * (
(
(36*(3 + np.sqrt(3))*(om1*om1*om1*om1*om1) + 108*(om1*om1*om1*om1)*(2 + np.sqrt(3) + 2*(3 + np.sqrt(3))*om2 + (3 + np.sqrt(3))*om3) - 18*(om1*om1*om1)*(-9 - 5*np.sqrt(3) - 30*(2 + np.sqrt(3))*om2 + 166*(3 + np.sqrt(3))*(om2*om2) - 15*(2 + np.sqrt(3))*om3 + 166*(3 + np.sqrt(3))*om2*om3 - 4*(3 + np.sqrt(3))*(om3*om3)) - 43*om2*(om2 + om3)*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3 + 4*om2*(2*(9 + 5*np.sqrt(3)) + 9*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3)) - 6*(om1*om1)*(-7 - 4*np.sqrt(3) + 1080*(3 + np.sqrt(3))*(om2*om2*om2) - 6*om3*(9 + 5*np.sqrt(3) + 4*(2 + np.sqrt(3))*om3) + 54*(om2*om2)*(13*(2 + np.sqrt(3)) + 30*(3 + np.sqrt(3))*om3) + 6*om2*(-2*(9 + 5*np.sqrt(3)) + 117*(2 + np.sqrt(3))*om3 + 90*(3 + np.sqrt(3))*(om3*om3))) + 3*om1*(-1104*(3 + np.sqrt(3))*(om2*om2*om2*om2) - 48*(om2*om2*om2)*(43*(2 + np.sqrt(3)) + 46*(3 + np.sqrt(3))*om3) + om3*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3) - 2*om2*(-3*(7 + 4*np.sqrt(3)) + 80*(9 + 5*np.sqrt(3))*om3 + 516*(2 + np.sqrt(3))*(om3*om3)) - 8*(om2*om2)*(20*(9 + 5*np.sqrt(3)) + 387*(2 + np.sqrt(3))*om3 + 138*(3 + np.sqrt(3))*(om3*om3))))/(216.*om1*(om1 + om2)*(om1 + om2 + om3)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-1] + (
(49*(3*(7 + 4*np.sqrt(3)) + 8*(9 + 5*np.sqrt(3))*om2 + 4*(9 + 5*np.sqrt(3))*om3 + 4*(9*(2 + np.sqrt(3))*(om1*om1) + 9*(2 + np.sqrt(3))*om2*(om2 + om3) + om1*(2*(9 + 5*np.sqrt(3)) + 18*(2 + np.sqrt(3))*om2 + 9*(2 + np.sqrt(3))*om3))))/(216.*om1*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-2] + (
-(-18*(2 + np.sqrt(3))*(om1*om1*om1) + 138*(7 + 4*np.sqrt(3))*(om2 + om3) + 184*(9 + 5*np.sqrt(3))*((om2 + om3)*(om2 + om3)) + 6*(om1*om1)*(-9 - 5*np.sqrt(3) + 279*(2 + np.sqrt(3))*om2 + 279*(2 + np.sqrt(3))*om3) + om1*(-3*(7 + 4*np.sqrt(3)) + 1692*(2 + np.sqrt(3))*(om2*om2) + 8*om2*(46*(9 + 5*np.sqrt(3)) + 423*(2 + np.sqrt(3))*om3) + 4*om3*(92*(9 + 5*np.sqrt(3)) + 423*(2 + np.sqrt(3))*om3)))/(216.*(om1 + om2)*om3*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-3] + (
(-18*(2 + np.sqrt(3))*(om1*om1*om1) + 6*(om1*om1)*(-9 - 5*np.sqrt(3) + 279*(2 + np.sqrt(3))*om2) + 46*om2*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om2) + om1*(-3*(7 + 4*np.sqrt(3)) + 4*om2*(92*(9 + 5*np.sqrt(3)) + 423*(2 + np.sqrt(3))*om2)))/(216.*om3*(om1 + om2 + om3)*(om1*om1 - 92*om2*(om2 + om3) + 2*om1*(2*om2 + om3)))
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_EDC23(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_EDC23, adaptive_step_EDC23,
**kwargs)
def cons_or_diss_EDC23(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_EDC23, adaptive_step_EDC23,
fixed_estimate_EDC23, adaptive_estimate_EDC23,
**kwargs)
def fixed_step_EDC33(uu, ff, h):
u_new = (
1.6363636363636365
) * uu[-1] + (
-0.8181818181818182
) * uu[-2] + (
0.18181818181818182
) * uu[-3] + h * (
(
2.0454545454545454
) * ff[-1] + (
-2.8636363636363638
) * ff[-2] + (
1.7727272727272727
) * ff[-3] + (
-0.4090909090909091
) * ff[-4]
)
return u_new
def fixed_estimate_EDC33(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h):
u1int = (
1.0205478614436652
) * uu[-1] + (
-0.025360387027637658
) * uu[-2] + (
0.004812525583972387
) * uu[-3] + h * (
(
0.2475844717004499
) * ff[-1] + (
-0.08876135459673179
) * ff[-2] + (
0.04692212444373077
) * ff[-3] + (
-0.010155712001954682
) * ff[-4]
)
u2int = (
1.3645475369850666
) * uu[-1] + (
-0.46422434922376526
) * uu[-2] + (
0.09967681223869877
) * uu[-3] + h * (
(
1.3976325129609792
) * ff[-1] + (
-1.6247852222831785
) * ff[-2] + (
0.971848919327313
) * ff[-3] + (
-0.2208918001566686
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def adaptive_step_EDC33(uu, ff, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u_new = (
(-2*om2*((1 + om1 + om2)*(1 + om1 + om2))*(-7*(om1*om1*om1*om1) - om1*om1*om1*(-14 + om2) + 3*om1*om2 + om2*om2 + 3*(om1*om1)*(7 + 2*(-2 + om2)*om2)) + (14*(-3 + om1)*(om1*om1)*((1 + om1)*(1 + om1)*(1 + om1)) + 21*om1*(5 + om1*(8 + 2*om1 + om1*om1*om1))*om2 - 3*((1 + om1)*(1 + om1))*(-37 + 59*(-2 + om1)*om1)*(om2*om2) + 2*(148 + 331*om1 + 580*(om1*om1*om1))*(om2*om2*om2) + 224*(1 + 6*(om1*om1))*(om2*om2*om2*om2))*om3 + 2*(14*(-2 + om1)*(om1*om1)*((1 + om1)*(1 + om1)) - 35*(-2 + om1)*om1*((1 + om1)*(1 + om1))*om2 + 37*(2 + 6*om1 + 17*(om1*om1*om1))*(om2*om2) + 113*(1 + 6*(om1*om1))*(om2*om2*om2))*(om3*om3))/(om1*om1*(om1 + om2)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * uu[-1] + (
(2*((om1 + om2)*(om1 + om2))*((1 + om1 + om2)*(1 + om1 + om2)) + (2*om1*(1 + om1)*(3 + 5*om1) - (111 + 4*om1*(70 + 51*om1))*om2 - 2*(148 + 219*om1)*(om2*om2) - 224*(om2*om2*om2))*om3 + 2*(4*om1*(1 + om1) - (74 + 109*om1)*om2 - 113*(om2*om2))*(om3*om3))/(om1*om1*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * uu[-2] + (
(-2*(om1*om1*om1) + 12*(om2 + om3)*(3 + 4*om2 + 4*om3) + om1*om1*(-4 + 74*om2 + 74*om3) + 2*om1*(-1 + 38*(om2*om2) + 48*om3 + 38*(om3*om3) + om2*(48 + 76*om3)))/((om1 + om2)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * uu[-3] + h * (
(
(-2*om2*((om1 + om2)*(om1 + om2))*((1 + om1 + om2)*(1 + om1 + om2))*(-14*om1*(1 + om1) + (5 + 12*om1)*om2) + (28*(om1*om1*om1)*((1 + om1)*(1 + om1)*(1 + om1)) + 14*(om1*om1)*((1 + om1)*(1 + om1))*(1 + 7*om1)*om2 - 42*om1*((1 + om1)*(1 + om1))*(5 + 6*om1)*(om2*om2) + (545 + 2*om1*(880 + om1*(1215 + 989*om1)))*(om2*om2*om2) + 292*(5 + om1*(15 + 17*om1))*(om2*om2*om2*om2) + 222*(5 + 12*om1)*(om2*om2*om2*om2*om2))*om3 + 3*(14*(om1*om1)*((1 + om1)*(1 + om1))*(1 + 2*om1) - 7*om1*((1 + om1)*(1 + om1))*(5 + 2*om1)*om2 + (185 + 2*om1*(300 + om1*(415 + 337*om1)))*(om2*om2) + 148*(5 + om1*(15 + 17*om1))*(om2*om2*om2) + 150*(5 + 12*om1)*(om2*om2*om2*om2))*(om3*om3) + 2*(28*(om1*om1)*((1 + om1)*(1 + om1)) - 70*om1*((1 + om1)*(1 + om1))*om2 + 74*(5 + om1*(15 + 17*om1))*(om2*om2) + 113*(5 + 12*om1)*(om2*om2*om2))*(om3*om3*om3))/(2.*om1*(om1 + om2)*(om1 + om2 + om3)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-1] + (
(7*(2*((om1 + om2)*(om1 + om2))*((1 + om1 + om2)*(1 + om1 + om2)) + (2*om1*(1 + om1)*(3 + 5*om1) - (111 + 4*om1*(70 + 51*om1))*om2 - 2*(148 + 219*om1)*(om2*om2) - 224*(om2*om2*om2))*om3 + 2*(4*om1*(1 + om1) - (74 + 109*om1)*om2 - 113*(om2*om2))*(om3*om3)))/(2.*om1*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-2] + (
(39*om2*(-(om1*om1*om1) + 6*(om2 + om3)*(3 + 4*om2 + 4*om3) + om1*om1*(-2 + 37*om2 + 37*om3) + om1*(-1 + 38*(om2*om2) + 48*om3 + 38*(om3*om3) + om2*(48 + 76*om3))))/(2.*(om1 + om2)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-3] + (
-(14*((om1 + om1*om1)*(om1 + om1*om1)) - 49*om1*((1 + om1)*(1 + om1))*om2 + (678 + om1*(1800 + 1381*om1))*(om2*om2) + 76*(12 + 19*om1)*(om2*om2*om2))/(2.*(om1 + om2 + om3)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-4]
)
return u_new
def adaptive_estimate_EDC33(eta, deta, f, uu, ff, old_eta, old_deta_f, idx_u_old, h, old_omega):
om3 = old_omega[-3]
om2 = old_omega[-2]
om1 = old_omega[-1]
u1int = (
(12*(-2 + np.sqrt(3) - 36*(om1*om1))*(om2*om2*om2*om2*om2) + 2*(om2*om2*om2)*(-7 + 4*np.sqrt(3) + 4*om1*(2*(-9 + 5*np.sqrt(3)) + 15*(-2 + np.sqrt(3))*om1 + 27*(om1*om1*om1)) + 148*(9 - 5*np.sqrt(3))*om3 + 6*om1*(-331*(-2 + np.sqrt(3)) + 3480*(om1*om1))*om3 - 678*(-2 + np.sqrt(3) - 36*(om1*om1))*(om3*om3)) + om2*om2*(6*om1*(-7 + 4*np.sqrt(3) + 12*om1*(-9 + 5*np.sqrt(3) + 8*(-2 + np.sqrt(3))*om1 + 15*(om1*om1*om1))) + 111*(7 - 4*np.sqrt(3))*om3 - 36*om1*(16*(-9 + 5*np.sqrt(3)) + 107*(-2 + np.sqrt(3))*om1 + 177*(om1*om1*om1))*om3 - 148*(-9 + 5*np.sqrt(3) + 18*om1*(-2 + np.sqrt(3) - 17*(om1*om1)))*(om3*om3)) + 7*om1*om2*(2*om1*(3*(-7 + 4*np.sqrt(3)) + 4*om1*(2*(-9 + 5*np.sqrt(3)) + 9*om1*(-2 + np.sqrt(3) + om1*om1))) + 15*(7 - 4*np.sqrt(3))*om3 + 12*om1*(18 - 10*np.sqrt(3) - 3*(-2 + np.sqrt(3))*om1 + 9*(om1*om1*om1))*om3 - 20*(-9 + 5*np.sqrt(3) + 9*om1*(-2 + np.sqrt(3) + 2*(om1*om1)))*(om3*om3)) + 4*(om2*om2*om2*om2)*(-9 + 5*np.sqrt(3) - 336*(-2 + np.sqrt(3))*om3 + 3*om1*(5*(-2 + np.sqrt(3)) - 66*(om1*om1) + 4032*om1*om3)) + 14*(om1*om1)*om3*(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om3 + 4*om1*(2*(-9 + 5*np.sqrt(3)) + 9*(-2 + np.sqrt(3))*om3 + 9*om1*(-2 + np.sqrt(3) + om1*om1 + 2*om1*om3))))/(36.*(om1*om1)*(om1 + om2)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * uu[-1] + (
(-12*(-2 + np.sqrt(3))*(om1*om1*om1*om1) + 4*(om1*om1*om1)*(9 - 5*np.sqrt(3) - 12*(-2 + np.sqrt(3))*om2 - 15*(-2 + np.sqrt(3))*om3) - 2*(om1*om1)*(-7 + 4*np.sqrt(3) + 36*(-2 + np.sqrt(3))*(om2*om2) + 6*om2*(-9 + 5*np.sqrt(3) - 102*(-2 + np.sqrt(3))*om3) + 8*om3*(-9 + 5*np.sqrt(3) + 3*(-2 + np.sqrt(3))*om3)) + 2*om1*(-24*(-2 + np.sqrt(3))*(om2*om2*om2) + om3*(21 - 12*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om3) + 6*(om2*om2)*(9 - 5*np.sqrt(3) + 219*(-2 + np.sqrt(3))*om3) + 2*om2*(7 - 4*np.sqrt(3) + 70*(-9 + 5*np.sqrt(3))*om3 + 327*(-2 + np.sqrt(3))*(om3*om3))) + om2*(-12*(-2 + np.sqrt(3))*(om2*om2*om2) + 37*om3*(-21 + 12*np.sqrt(3) - 36*om3 + 20*np.sqrt(3)*om3) + 4*(om2*om2)*(9 - 5*np.sqrt(3) + 336*(-2 + np.sqrt(3))*om3) + 2*om2*(7 - 4*np.sqrt(3) + 148*(-9 + 5*np.sqrt(3))*om3 + 678*(-2 + np.sqrt(3))*(om3*om3))))/(36.*(om1*om1)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * uu[-2] + (
(6*(-2 + np.sqrt(3))*(om1*om1*om1) - 18*(-7 + 4*np.sqrt(3))*(om2 + om3) - 24*(-9 + 5*np.sqrt(3))*((om2 + om3)*(om2 + om3)) + 2*(om1*om1)*(-9 + 5*np.sqrt(3) - 111*(-2 + np.sqrt(3))*om2 - 111*(-2 + np.sqrt(3))*om3) + om1*(-7 + 4*np.sqrt(3) - 228*(-2 + np.sqrt(3))*(om2*om2) + 12*om3*(36 - 20*np.sqrt(3) - 19*(-2 + np.sqrt(3))*om3) + 24*om2*(18 - 10*np.sqrt(3) - 19*(-2 + np.sqrt(3))*om3)))/(18.*(om1 + om2)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * uu[-3] + h * (
(
(-168*(-3 + np.sqrt(3))*(om1*om1*om1*om1*om1*om1)*(om2 + om3) + 12*(om1*om1*om1*om1*om1)*(-44*(-3 + np.sqrt(3))*(om2*om2) - 42*om3*(-2 + np.sqrt(3) + (-3 + np.sqrt(3))*om3) - 7*om2*(6*(-2 + np.sqrt(3)) + 7*(-3 + np.sqrt(3))*om3)) + 5*(om2*om2)*(om2 + om3)*(12*(-2 + np.sqrt(3))*(om2*om2*om2) - 4*(om2*om2)*(9 - 5*np.sqrt(3) + 336*(-2 + np.sqrt(3))*om3) - 37*om3*(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om3) - 2*om2*(7 - 4*np.sqrt(3) + 148*(-9 + 5*np.sqrt(3))*om3 + 678*(-2 + np.sqrt(3))*(om3*om3))) + 12*(om1*om1*om1*om1)*(-36*(-3 + np.sqrt(3))*(om2*om2*om2) + 3*(om2*om2)*(-37*(-2 + np.sqrt(3)) + 42*(-3 + np.sqrt(3))*om3) - 7*om3*(-9 + 5*np.sqrt(3) + 15*(-2 + np.sqrt(3))*om3 + 4*(-3 + np.sqrt(3))*(om3*om3)) + 7*om2*(9 - 5*np.sqrt(3) + 3*om3*(-5*(-2 + np.sqrt(3)) + (-3 + np.sqrt(3))*om3))) + 2*(om1*om1*om1)*(96*(-3 + np.sqrt(3))*(om2*om2*om2*om2) + 6*(om2*om2*om2)*(-76*(-2 + np.sqrt(3)) - 989*(-3 + np.sqrt(3))*om3) - 18*(om2*om2)*(5*(-9 + 5*np.sqrt(3)) - 119*(-2 + np.sqrt(3))*om3 + 337*(-3 + np.sqrt(3))*(om3*om3)) + 7*om2*(14 - 8*np.sqrt(3) + 9*(9 - 5*np.sqrt(3))*om3 + 81*(-2 + np.sqrt(3))*(om3*om3) + 60*(-3 + np.sqrt(3))*(om3*om3*om3)) + 14*om3*(7 - 4*np.sqrt(3) - 6*om3*(-9 + 5*np.sqrt(3) + 4*(-2 + np.sqrt(3))*om3))) + 2*(om1*om1)*(204*(-3 + np.sqrt(3))*(om2*om2*om2*om2*om2) + 7*(om3*om3)*(21 - 12*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om3) - 12*(om2*om2*om2*om2)*(-9*(-2 + np.sqrt(3)) + 1241*(-3 + np.sqrt(3))*om3) + om2*om2*om2*(44*(9 - 5*np.sqrt(3)) - 7290*(-2 + np.sqrt(3))*om3 - 22644*(-3 + np.sqrt(3))*(om3*om3)) + om2*om2*(161 - 92*np.sqrt(3) + 336*(-9 + 5*np.sqrt(3))*om3 - 7470*(-2 + np.sqrt(3))*(om3*om3) - 7548*(-3 + np.sqrt(3))*(om3*om3*om3)) + 7*om2*om3*(7 - 4*np.sqrt(3) + 6*om3*(3*(-9 + 5*np.sqrt(3)) + 20*(-2 + np.sqrt(3))*om3))) + om1*om2*(144*(-3 + np.sqrt(3))*(om2*om2*om2*om2*om2) - 72*(om2*om2*om2*om2)*(-5*(-2 + np.sqrt(3)) + 222*(-3 + np.sqrt(3))*om3) + 35*(om3*om3)*(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om3) - 30*om2*om3*(49 - 28*np.sqrt(3) + 60*(-9 + 5*np.sqrt(3))*om3 + 444*(-2 + np.sqrt(3))*(om3*om3)) - 4*(om2*om2*om2)*(63 - 35*np.sqrt(3) + 90*om3*(73*(-2 + np.sqrt(3)) + 90*(-3 + np.sqrt(3))*om3)) - 8*(om2*om2)*(-7 + 4*np.sqrt(3) + om3*(220*(-9 + 5*np.sqrt(3)) + 9*om3*(555*(-2 + np.sqrt(3)) + 226*(-3 + np.sqrt(3))*om3)))))/(72.*om1*(om1 + om2)*(om1 + om2 + om3)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-1] + (
(7*(-12*(-2 + np.sqrt(3))*(om1*om1*om1*om1) + 4*(om1*om1*om1)*(9 - 5*np.sqrt(3) - 12*(-2 + np.sqrt(3))*om2 - 15*(-2 + np.sqrt(3))*om3) - 2*(om1*om1)*(-7 + 4*np.sqrt(3) + 36*(-2 + np.sqrt(3))*(om2*om2) + 6*om2*(-9 + 5*np.sqrt(3) - 102*(-2 + np.sqrt(3))*om3) + 8*om3*(-9 + 5*np.sqrt(3) + 3*(-2 + np.sqrt(3))*om3)) + 2*om1*(-24*(-2 + np.sqrt(3))*(om2*om2*om2) + om3*(21 - 12*np.sqrt(3) + 4*(9 - 5*np.sqrt(3))*om3) + 6*(om2*om2)*(9 - 5*np.sqrt(3) + 219*(-2 + np.sqrt(3))*om3) + 2*om2*(7 - 4*np.sqrt(3) + 70*(-9 + 5*np.sqrt(3))*om3 + 327*(-2 + np.sqrt(3))*(om3*om3))) + om2*(-12*(-2 + np.sqrt(3))*(om2*om2*om2) + 4*(om2*om2)*(9 - 5*np.sqrt(3) + 336*(-2 + np.sqrt(3))*om3) + 37*om3*(3*(-7 + 4*np.sqrt(3)) + 4*(-9 + 5*np.sqrt(3))*om3) + 2*om2*(7 - 4*np.sqrt(3) + 148*(-9 + 5*np.sqrt(3))*om3 + 678*(-2 + np.sqrt(3))*(om3*om3)))))/(72.*om1*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-2] + (
(-13*om2*(-6*(-2 + np.sqrt(3))*(om1*om1*om1) + 18*(-7 + 4*np.sqrt(3))*(om2 + om3) + 24*(-9 + 5*np.sqrt(3))*((om2 + om3)*(om2 + om3)) + 2*(om1*om1)*(9 - 5*np.sqrt(3) + 111*(-2 + np.sqrt(3))*om2 + 111*(-2 + np.sqrt(3))*om3) + om1*(7 - 4*np.sqrt(3) + 48*(-9 + 5*np.sqrt(3))*om2 + 228*(-2 + np.sqrt(3))*(om2*om2) + 48*(-9 + 5*np.sqrt(3))*om3 + 456*(-2 + np.sqrt(3))*om2*om3 + 228*(-2 + np.sqrt(3))*(om3*om3))))/(24.*(om1 + om2)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-3] + (
(84*(-2 + np.sqrt(3))*(om1*om1*om1*om1) + 678*(-7 + 4*np.sqrt(3))*(om2*om2) + 912*(-9 + 5*np.sqrt(3))*(om2*om2*om2) + 14*(om1*om1*om1)*(2*(-9 + 5*np.sqrt(3)) - 21*(-2 + np.sqrt(3))*om2) + 2*(om1*om1)*(7*(-7 + 4*np.sqrt(3)) + 49*(9 - 5*np.sqrt(3))*om2 + 4143*(-2 + np.sqrt(3))*(om2*om2)) + om1*om2*(49*(7 - 4*np.sqrt(3)) + 24*om2*(75*(-9 + 5*np.sqrt(3)) + 361*(-2 + np.sqrt(3))*om2)))/(72.*(om1 + om2 + om3)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-4]
)
u2int = (
(-12*(2 + np.sqrt(3) + 36*(om1*om1))*(om2*om2*om2*om2*om2) - 4*(om2*om2*om2*om2)*(9 + 5*np.sqrt(3) + 30*om1 + 15*np.sqrt(3)*om1 + 198*(om1*om1*om1) - 336*(2 + np.sqrt(3) + 36*(om1*om1))*om3) + 2*(om2*om2*om2)*(-7 - 4*np.sqrt(3) + 4*om1*(-2*(9 + 5*np.sqrt(3)) - 15*(2 + np.sqrt(3))*om1 + 27*(om1*om1*om1)) + 148*(9 + 5*np.sqrt(3))*om3 + 6*om1*(331*(2 + np.sqrt(3)) + 3480*(om1*om1))*om3 + 678*(2 + np.sqrt(3) + 36*(om1*om1))*(om3*om3)) + om2*om2*(6*om1*(-7 - 4*np.sqrt(3) + 12*om1*(-9 - 5*np.sqrt(3) - 8*(2 + np.sqrt(3))*om1 + 15*(om1*om1*om1))) + 111*(7 + 4*np.sqrt(3))*om3 + 36*om1*(16*(9 + 5*np.sqrt(3)) + 107*(2 + np.sqrt(3))*om1 - 177*(om1*om1*om1))*om3 + 148*(9 + 5*np.sqrt(3) + 18*om1*(2 + np.sqrt(3) + 17*(om1*om1)))*(om3*om3)) + 7*om1*om2*(72*(om1*om1*om1*om1*om1) + 108*(om1*om1*om1*om1)*om3 + 4*(om1*om1)*(-4*(9 + 5*np.sqrt(3)) + 9*(2 + np.sqrt(3))*om3) + 5*om3*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3) - 72*(om1*om1*om1)*(2 + np.sqrt(3) + 5*(om3*om3)) + 6*om1*(-7 - 4*np.sqrt(3) + 4*(9 + 5*np.sqrt(3))*om3 + 30*(2 + np.sqrt(3))*(om3*om3))) + 14*(om1*om1)*om3*(-3*(7 + 4*np.sqrt(3)) - 4*(9 + 5*np.sqrt(3))*om3 + 4*om1*(-2*(9 + 5*np.sqrt(3)) - 9*(2 + np.sqrt(3))*om3 + 9*om1*(-2 - np.sqrt(3) + om1*om1 + 2*om1*om3))))/(36.*(om1*om1)*(om1 + om2)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * uu[-1] + (
(12*(2 + np.sqrt(3))*(om1*om1*om1*om1) + 4*(om1*om1*om1)*(9 + 5*np.sqrt(3) + 12*(2 + np.sqrt(3))*om2 + 15*(2 + np.sqrt(3))*om3) + 2*(om1*om1)*(7 + 4*np.sqrt(3) + 36*(2 + np.sqrt(3))*(om2*om2) + 6*om2*(9 + 5*np.sqrt(3) - 102*(2 + np.sqrt(3))*om3) + 8*om3*(9 + 5*np.sqrt(3) + 3*(2 + np.sqrt(3))*om3)) + 2*om1*(24*(2 + np.sqrt(3))*(om2*om2*om2) + om3*(21 + 12*np.sqrt(3) + 36*om3 + 20*np.sqrt(3)*om3) - 6*(om2*om2)*(-9 - 5*np.sqrt(3) + 219*(2 + np.sqrt(3))*om3) - 2*om2*(-7 - 4*np.sqrt(3) + 70*(9 + 5*np.sqrt(3))*om3 + 327*(2 + np.sqrt(3))*(om3*om3))) + om2*(12*(2 + np.sqrt(3))*(om2*om2*om2) - 37*om3*(21 + 12*np.sqrt(3) + 36*om3 + 20*np.sqrt(3)*om3) - 4*(om2*om2)*(-9 - 5*np.sqrt(3) + 336*(2 + np.sqrt(3))*om3) - 2*om2*(-7 - 4*np.sqrt(3) + 148*(9 + 5*np.sqrt(3))*om3 + 678*(2 + np.sqrt(3))*(om3*om3))))/(36.*(om1*om1)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * uu[-2] + (
(-6*(2 + np.sqrt(3))*(om1*om1*om1) + 18*(7 + 4*np.sqrt(3))*(om2 + om3) + 24*(9 + 5*np.sqrt(3))*((om2 + om3)*(om2 + om3)) + 2*(om1*om1)*(-9 - 5*np.sqrt(3) + 111*(2 + np.sqrt(3))*om2 + 111*(2 + np.sqrt(3))*om3) + om1*(-7 - 4*np.sqrt(3) + 48*(9 + 5*np.sqrt(3))*om2 + 228*(2 + np.sqrt(3))*(om2*om2) + 48*(9 + 5*np.sqrt(3))*om3 + 456*(2 + np.sqrt(3))*om2*om3 + 228*(2 + np.sqrt(3))*(om3*om3)))/(18.*(om1 + om2)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * uu[-3] + h * (
(
(168*(3 + np.sqrt(3))*(om1*om1*om1*om1*om1*om1)*(om2 + om3) + 12*(om1*om1*om1*om1*om1)*(44*(3 + np.sqrt(3))*(om2*om2) + 42*om3*(2 + np.sqrt(3) + (3 + np.sqrt(3))*om3) + 7*om2*(6*(2 + np.sqrt(3)) + 7*(3 + np.sqrt(3))*om3)) - 5*(om2*om2)*(om2 + om3)*(12*(2 + np.sqrt(3))*(om2*om2*om2) - 4*(om2*om2)*(-9 - 5*np.sqrt(3) + 336*(2 + np.sqrt(3))*om3) - 37*om3*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3) - 2*om2*(-7 - 4*np.sqrt(3) + 148*(9 + 5*np.sqrt(3))*om3 + 678*(2 + np.sqrt(3))*(om3*om3))) + 2*(om1*om1*om1)*(-96*(3 + np.sqrt(3))*(om2*om2*om2*om2) + 6*(om2*om2*om2)*(76*(2 + np.sqrt(3)) + 989*(3 + np.sqrt(3))*om3) + 18*(om2*om2)*(5*(9 + 5*np.sqrt(3)) - 119*(2 + np.sqrt(3))*om3 + 337*(3 + np.sqrt(3))*(om3*om3)) - 7*om2*(-2*(7 + 4*np.sqrt(3)) - 9*(9 + 5*np.sqrt(3))*om3 + 81*(2 + np.sqrt(3))*(om3*om3) + 60*(3 + np.sqrt(3))*(om3*om3*om3)) + 14*om3*(7 + 4*np.sqrt(3) + 6*om3*(9 + 5*np.sqrt(3) + 4*(2 + np.sqrt(3))*om3))) + 12*(om1*om1*om1*om1)*(36*(3 + np.sqrt(3))*(om2*om2*om2) + 3*(om2*om2)*(37*(2 + np.sqrt(3)) - 42*(3 + np.sqrt(3))*om3) + 7*om3*(9 + 5*np.sqrt(3) + 15*(2 + np.sqrt(3))*om3 + 4*(3 + np.sqrt(3))*(om3*om3)) - 7*om2*(-9 - 5*np.sqrt(3) + 3*om3*(-5*(2 + np.sqrt(3)) + (3 + np.sqrt(3))*om3))) + 2*(om1*om1)*(-204*(3 + np.sqrt(3))*(om2*om2*om2*om2*om2) + 12*(om2*om2*om2*om2)*(-9*(2 + np.sqrt(3)) + 1241*(3 + np.sqrt(3))*om3) + 7*(om3*om3)*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3) + om2*om2*(23*(7 + 4*np.sqrt(3)) - 336*(9 + 5*np.sqrt(3))*om3 + 7470*(2 + np.sqrt(3))*(om3*om3) + 7548*(3 + np.sqrt(3))*(om3*om3*om3)) + 7*om2*om3*(7 + 4*np.sqrt(3) - 6*om3*(3*(9 + 5*np.sqrt(3)) + 20*(2 + np.sqrt(3))*om3)) + 2*(om2*om2*om2)*(22*(9 + 5*np.sqrt(3)) + 9*om3*(405*(2 + np.sqrt(3)) + 1258*(3 + np.sqrt(3))*om3))) + om1*om2*(-144*(3 + np.sqrt(3))*(om2*om2*om2*om2*om2) + 72*(om2*om2*om2*om2)*(-5*(2 + np.sqrt(3)) + 222*(3 + np.sqrt(3))*om3) - 35*(om3*om3)*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3) + 30*om2*om3*(-7*(7 + 4*np.sqrt(3)) + 60*(9 + 5*np.sqrt(3))*om3 + 444*(2 + np.sqrt(3))*(om3*om3)) + 4*(om2*om2*om2)*(-7*(9 + 5*np.sqrt(3)) + 90*om3*(73*(2 + np.sqrt(3)) + 90*(3 + np.sqrt(3))*om3)) + 8*(om2*om2)*(7 + 4*np.sqrt(3) + om3*(220*(9 + 5*np.sqrt(3)) + 9*om3*(555*(2 + np.sqrt(3)) + 226*(3 + np.sqrt(3))*om3)))))/(72.*om1*(om1 + om2)*(om1 + om2 + om3)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-1] + (
(7*(12*(2 + np.sqrt(3))*(om1*om1*om1*om1) + 4*(om1*om1*om1)*(9 + 5*np.sqrt(3) + 12*(2 + np.sqrt(3))*om2 + 15*(2 + np.sqrt(3))*om3) + 2*(om1*om1)*(7 + 4*np.sqrt(3) + 36*(2 + np.sqrt(3))*(om2*om2) + 6*om2*(9 + 5*np.sqrt(3) - 102*(2 + np.sqrt(3))*om3) + 8*om3*(9 + 5*np.sqrt(3) + 3*(2 + np.sqrt(3))*om3)) + 2*om1*(24*(2 + np.sqrt(3))*(om2*om2*om2) - 6*(om2*om2)*(-9 - 5*np.sqrt(3) + 219*(2 + np.sqrt(3))*om3) + om3*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3) - 2*om2*(-7 - 4*np.sqrt(3) + 70*(9 + 5*np.sqrt(3))*om3 + 327*(2 + np.sqrt(3))*(om3*om3))) + om2*(12*(2 + np.sqrt(3))*(om2*om2*om2) - 4*(om2*om2)*(-9 - 5*np.sqrt(3) + 336*(2 + np.sqrt(3))*om3) - 37*om3*(3*(7 + 4*np.sqrt(3)) + 4*(9 + 5*np.sqrt(3))*om3) - 2*om2*(-7 - 4*np.sqrt(3) + 148*(9 + 5*np.sqrt(3))*om3 + 678*(2 + np.sqrt(3))*(om3*om3)))))/(72.*om1*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-2] + (
(-13*om2*(6*(2 + np.sqrt(3))*(om1*om1*om1) - 18*(7 + 4*np.sqrt(3))*(om2 + om3) - 24*(9 + 5*np.sqrt(3))*((om2 + om3)*(om2 + om3)) - 2*(om1*om1)*(-9 - 5*np.sqrt(3) + 111*(2 + np.sqrt(3))*om2 + 111*(2 + np.sqrt(3))*om3) - om1*(-7 - 4*np.sqrt(3) + 48*(9 + 5*np.sqrt(3))*om2 + 228*(2 + np.sqrt(3))*(om2*om2) + 48*(9 + 5*np.sqrt(3))*om3 + 456*(2 + np.sqrt(3))*om2*om3 + 228*(2 + np.sqrt(3))*(om3*om3))))/(24.*(om1 + om2)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-3] + (
(-84*(2 + np.sqrt(3))*(om1*om1*om1*om1) + 14*(om1*om1*om1)*(-2*(9 + 5*np.sqrt(3)) + 21*(2 + np.sqrt(3))*om2) - 6*(om2*om2)*(113*(7 + 4*np.sqrt(3)) + 152*(9 + 5*np.sqrt(3))*om2) - 2*(om1*om1)*(7*(7 + 4*np.sqrt(3)) - 49*(9 + 5*np.sqrt(3))*om2 + 4143*(2 + np.sqrt(3))*(om2*om2)) + om1*om2*(49*(7 + 4*np.sqrt(3)) + 24*om2*(-75*(9 + 5*np.sqrt(3)) - 361*(2 + np.sqrt(3))*om2)))/(72.*(om1 + om2 + om3)*(14*(om1*om1*om1)*(om2 + om3) - 12*(om2*om2)*(om2 - 113*om3)*(om2 + om3) + om1*om1*(16*(om2*om2) + 7*om2*om3 + 28*(om3*om3)) - 2*om1*om2*(5*(om2*om2) + 92*om2*om3 + 49*(om3*om3))))
) * ff[-4]
)
eta_est = old_eta[-1] + h * ( 0.5 * np.dot(deta(u1int), f(u1int)) + 0.5 * np.dot(deta(u2int), f(u2int)) )
return eta_est
def conservative_EDC33(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return conservative_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_EDC33, adaptive_step_EDC33,
**kwargs)
def cons_or_diss_EDC33(f, t_final, t0, u0, t1, u1, t2, u2, t3, u3,
**kwargs):
return cons_or_diss_LMM(f, t_final, [t0, t1, t2, t3], [u0, u1, u2, u3],
fixed_step_EDC33, adaptive_step_EDC33,
fixed_estimate_EDC33, adaptive_estimate_EDC33,
**kwargs)
def relaxation_ERK(rkm, dt, f, eta, deta, w0, num_steps,
relaxed=True, method="brentq", tol=1.e-14, maxiter=10000, jac=False, newdt=True,
debug=False, print_gamma=False):
"""Relaxed explicit Runge-Kutta method for general functionals."""
rkm = rkm.__num__()
w = np.array(w0) # current value of the unknown function
t = 0 # current time
ww = np.zeros([np.size(w0), 1]) # values at each time step
ww[:,0] = w.copy()
tt = np.zeros(1) # time points for ww
gg = np.ones(1) # values of gamma
tt[0] = t
b = rkm.b
s = len(rkm)
y = np.zeros((s, np.size(w0))) # stage values
F = np.zeros((s, np.size(w0))) # stage derivatives
max_gammam1 = 0. # max(gamma-1) over all timesteps
old_gamma = 1.0
step = 0
while step < num_steps:
step = step + 1
for i in range(s):
y[i,:] = w.copy()
for j in range(i):
y[i,:] += rkm.A[i,j]*dt*F[j,:]
F[i,:] = f(y[i,:])
if relaxed:
direction = dt * sum([b[i]*F[i,:] for i in range(s)])
estimate = dt * sum([b[i]*np.dot(deta(y[i,:]),F[i,:]) for i in range(s)])
r = lambda gamma: eta(w+gamma*direction) - eta(w) - gamma*estimate
if debug:
print('r(1): ', r(1))
rjac= lambda gamma: np.array([np.dot(deta(w+gamma*direction), direction) - estimate])
if rjac == False:
use_jac = False
else:
use_jac = rjac
if method == "newton":
gam = newton(r, old_gamma, fprime=rjac, tol=tol, maxiter=maxiter)
success = True
msg = "Newton method did not converge"
elif method == "brentq" or method == "bisect":
left = 0.9 * old_gamma
right = 1.1 * old_gamma
left_right_iter = 0
while r(left) * r(right) > 0:
left *= 0.9
right *= 1.1
left_right_iter += 1
if left_right_iter > 100:
raise SolveForGammaException(
"No suitable bounds found after %d iterations.\nLeft = %e; r(left) = %e\nRight = %e; r(right) = %e\n"%(
left_right_iter, left, r(left), right, r(right)),
w)
if method == "brentq":
gam = brentq(r, left, right, xtol=tol, maxiter=maxiter)
else:
gam = bisect(r, left, right, xtol=tol, maxiter=maxiter)
success = True
msg = "%s method did not converge"%method
else:
sol = root(r, old_gamma, jac=use_jac, method=method, tol=tol,
options={'xtol': tol, 'maxiter': maxiter})
gam = sol.x; success = sol.success; msg = sol.message
if success == False:
print('Warning: fsolve did not converge.')
print(gam)
print(msg)
if gam <= 0:
print('Warning: gamma is negative.')
else:
gam = 1.
old_gamma = gam
if debug:
gm1 = np.abs(1.-gam)
max_gammam1 = max(max_gammam1,gm1)
if gm1 > 0.5:
print(gam)
raise Exception("The time step is probably too large.")
w = w + dt*gam*sum([b[i]*F[i] for i in range(s)])
if newdt == True:
t += gam*dt
else:
t += dt
tt = np.append(tt, t)
ww = np.append(ww, np.reshape(w.copy(), (len(w), 1)), axis=1)
gg = np.append(gg, gam)
if debug:
if print_gamma:
print(max_gammam1)
return tt, ww, gg
else:
return tt, ww
def relaxation_DIRK(rkm, dt, f, eta, deta, w0, num_steps,
relaxed=True, method="brentq", tol=1.e-14, maxiter=10000, jac=False, newdt=True,
debug=False, print_gamma=False):
"""Relaxed diagonally implicit Runge-Kutta method for general functionals."""
rkm = rkm.__num__()
w = np.array(w0) # current value of the unknown function
t = 0 # current time
ww = np.zeros([np.size(w0), 1]) # values at each time step
ww[:,0] = w.copy()
tt = np.zeros(1) # time points for ww
gg = np.ones(1) # values of gamma
tt[0] = t
b = rkm.b
s = len(rkm)
y = np.zeros((s, np.size(w0))) # stage values
F = np.zeros((s, np.size(w0))) # stage derivatives
max_gammam1 = 0. # max(gamma-1) over all timesteps
old_gamma = 1.0
step = 0
while step < num_steps:
step = step + 1
for i in range(s):
stageeq = lambda Y: (Y - w - dt*sum([rkm.A[i,j]*F[j,:] for j in range(i)]) \
- dt*rkm.A[i,i]*f(Y)).squeeze()
nexty, info, ier, mesg = fsolve(stageeq,w,full_output=1)
if ier != 1:
print(mesg)
# print(info)
# raise Exception("System couldn't be solved.")
y[i,:] = nexty.copy()
F[i,:] = f(y[i,:])
if relaxed:
direction = dt * sum([b[i]*F[i,:] for i in range(s)])
estimate = dt * sum([b[i]*np.dot(deta(y[i,:]),F[i,:]) for i in range(s)])
r = lambda gamma: eta(w+gamma*direction) - eta(w) - gamma*estimate
if debug:
print('r(1): ', r(1))
rjac= lambda gamma: np.array([np.dot(deta(w+gamma*direction), direction) - estimate])
if rjac == False:
use_jac = False
else:
use_jac = rjac
if method == "newton":
gam = newton(r, old_gamma, fprime=rjac, tol=tol, maxiter=maxiter)
success = True
msg = "Newton method did not converge"
elif method == "brentq" or method == "bisect":
left = 0.9 * old_gamma
right = 1.1 * old_gamma
left_right_iter = 0
while r(left) * r(right) > 0:
left *= 0.9
right *= 1.1
left_right_iter += 1
if left_right_iter > 100:
raise SolveForGammaException(
"No suitable bounds found after %d iterations.\nLeft = %e; r(left) = %e\nRight = %e; r(right) = %e\n"%(
left_right_iter, left, r(left), right, r(right)),
w)
if method == "brentq":
gam = brentq(r, left, right, xtol=tol, maxiter=maxiter)
else:
gam = bisect(r, left, right, xtol=tol, maxiter=maxiter)
success = True
msg = "%s method did not converge"%method
else:
sol = root(r, old_gamma, jac=use_jac, method=method, tol=tol,
options={'xtol': tol, 'maxiter': maxiter})
gam = sol.x; success = sol.success; msg = sol.message
if success == False:
print('Warning: fsolve did not converge.')
print(gam)
print(msg)
if gam <= 0:
print('Warning: gamma is negative.')
else:
gam = 1.
old_gamma = gam
if debug:
gm1 = np.abs(1.-gam)
max_gammam1 = max(max_gammam1,gm1)
if gm1 > 0.5:
print(gam)
raise Exception("The time step is probably too large.")
w = w + dt*gam*sum([b[i]*F[i] for i in range(s)])
if newdt == True:
t += gam*dt
else:
t += dt
tt = np.append(tt, t)
ww = np.append(ww, np.reshape(w.copy(), (len(w), 1)), axis=1)
gg = np.append(gg, gam)
if debug:
if print_gamma:
print(max_gammam1)
return tt, ww, gg
else:
return tt, ww
def conservative_BDF2(f, t_final, t0, u0, t1, u1,
idx_u_old=-1,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
uu = [u0, u1]
ff = [f(u) for u in uu]
tt = [t0, t1]
h = tt[1] - tt[0]
old_omega = [(tt[i+1] - tt[i]) / h for i in np.arange(len(tt)-1)]
old_gamma = [1.0 for i in np.arange(len(tt)-1)]
if relaxation and projection:
raise Exception("Use either relaxation or projection, not both.")
if relaxation and method == None:
method = "brentq"
elif projection and method == None:
method = "simplified Newton"
t = tt[-1]
gammas = [1.0 for t in tt]
step = 0
while t < t_final and step < maxsteps:
step += 1
if relaxation and adapt_coefficients:
om1 = old_omega[-1]
uval = (
((1 + om1)*(1 + om1))/(om1*(2 + om1))
) * uu[-1] + (
-(1/(2*om1 + om1*om1))
) * uu[-2]
fcoeff = (1 + om1)/(2 + om1)
stageeq = lambda Y: (Y - uval - h * fcoeff * f(Y)).squeeze()
nexty, info, ier, mesg = fsolve(stageeq, uu[-1], full_output=1)
if ier != 1:
print(mesg)
# print(info)
# raise Exception("System couldn't be solved.")
u_new = nexty.copy()
else:
uval = (
1.3333333333333333
) * uu[-1] + (
-0.3333333333333333
) * uu[-2]
fcoeff = 0.6666666666666666
stageeq = lambda Y: (Y - uval - h * fcoeff * f(Y)).squeeze()
nexty, info, ier, mesg = fsolve(stageeq, uu[-1], full_output=1)
if ier != 1:
print(mesg)
# print(info)
# raise Exception("System couldn't be solved.")
u_new = nexty.copy()
u_old = uu[idx_u_old]
eta_old = eta(u_old)
if projection:
gamma, u_new = conservative_projection_solve(eta, deta, u_old, eta_old, u_new, method, tol, maxiter)
elif relaxation:
gamma = conservative_relaxation_solve(eta, deta, u_old, eta_old, u_new, old_gamma[-1], method, tol, maxiter)
u_new = u_old + gamma * (u_new - u_old)
for i in np.arange(-len(old_gamma), -1):
old_gamma[i] = old_gamma[i+1]
old_gamma[-1] = gamma
else:
gamma = 1.0
if return_gamma:
gammas.append(gamma)
uu.append(u_new)
if relaxation and adapt_dt:
t = tt[idx_u_old] - gamma * idx_u_old * h
new_omega = -idx_u_old*gamma - np.sum([old_omega[i] for i in np.arange(-1, idx_u_old, -1)])
for i in np.arange(-len(old_omega), -1):
old_omega[i] = old_omega[i+1]
old_omega[-1] = new_omega
if gamma < 1.0e-14:
raise Exception("gamma = %.2e is too small in step %d!" % (gamma, step))
else:
t += h
tt.append(t)
for i in np.arange(-len(ff), -1):
ff[i] = ff[i+1]
ff[-1] = f(u_new)
if return_gamma:
return np.array(tt), uu, np.array(gammas)
else:
return np.array(tt), uu
def conservative_BDF3(f, t_final, t0, u0, t1, u1, t2, u2,
idx_u_old=-1,
eta=etaL2, deta=detaL2,
return_gamma=False,
projection=False, relaxation=False,
adapt_dt=False, adapt_coefficients=False,
method=None, tol=1.e-14, maxiter=10000, maxsteps=10**12):
uu = [u0, u1, u2]
ff = [f(u) for u in uu]
tt = [t0, t1, t2]
h = tt[1] - tt[0]
old_omega = [(tt[i+1] - tt[i]) / h for i in np.arange(len(tt)-1)]
old_gamma = [1.0 for i in np.arange(len(tt)-1)]
if relaxation and projection:
raise Exception("Use either relaxation or projection, not both.")
if relaxation and method == None:
method = "brentq"
elif projection and method == None:
method = "simplified Newton"
t = tt[-1]
gammas = [1.0 for t in tt]
step = 0
while t < t_final and step < maxsteps:
step += 1
if relaxation and adapt_coefficients:
om2 = old_omega[-2]
om1 = old_omega[-1]
uval = (
((1 + om1)*(1 + om1)*((1 + om1 + om2)*(1 + om1 + om2)))/(om1*(om1 + om2)*(3 + 2*om2 + om1*(4 + om1 + om2)))
) * uu[-1] + (
-(((1 + om1 + om2)*(1 + om1 + om2))/(om1*om2*(3 + 2*om2 + om1*(4 + om1 + om2))))
) * uu[-2] + (
((1 + om1)*(1 + om1))/(om2*(om1 + om2)*(3 + 2*om2 + om1*(4 + om1 + om2)))
) * uu[-3]
fcoeff = 1/(1 + 1/(1 + om1) + 1/(1 + om1 + om2))
stageeq = lambda Y: (Y - uval - h * fcoeff * f(Y)).squeeze()
nexty, info, ier, mesg = fsolve(stageeq, uu[-1], full_output=1)
if ier != 1:
print(mesg)
# print(info)
# raise Exception("System couldn't be solved.")
u_new = nexty.copy()
else:
uval = (
1.6363636363636365
) * uu[-1] + (
-0.8181818181818182
) * uu[-2] + (
0.18181818181818182
) * uu[-3]
fcoeff = 0.5454545454545454
stageeq = lambda Y: (Y - uval - h * fcoeff * f(Y)).squeeze()
nexty, info, ier, mesg = fsolve(stageeq, uu[-1], full_output=1)
if ier != 1:
print(mesg)
# print(info)
# raise Exception("System couldn't be solved.")
u_new = nexty.copy()
u_old = uu[idx_u_old]
eta_old = eta(u_old)
if projection:
gamma, u_new = conservative_projection_solve(eta, deta, u_old, eta_old, u_new, method, tol, maxiter)
elif relaxation:
gamma = conservative_relaxation_solve(eta, deta, u_old, eta_old, u_new, old_gamma[-1], method, tol, maxiter)
u_new = u_old + gamma * (u_new - u_old)
for i in np.arange(-len(old_gamma), -1):
old_gamma[i] = old_gamma[i+1]
old_gamma[-1] = gamma
else:
gamma = 1.0
if return_gamma:
gammas.append(gamma)
uu.append(u_new)
if relaxation and adapt_dt:
t = tt[idx_u_old] - gamma * idx_u_old * h
new_omega = -idx_u_old*gamma - np.sum([old_omega[i] for i in np.arange(-1, idx_u_old, -1)])
for i in np.arange(-len(old_omega), -1):
old_omega[i] = old_omega[i+1]
old_omega[-1] = new_omega
if gamma < 1.0e-14:
raise Exception("gamma = %.2e is too small in step %d!" % (gamma, step))
else:
t += h
tt.append(t)
for i in np.arange(-len(ff), -1):
ff[i] = ff[i+1]
ff[-1] = f(u_new)
if return_gamma:
return np.array(tt), uu, np.array(gammas)
else:
return np.array(tt), uu
| 56.803176
| 3,270
| 0.474502
| 33,302
| 200,288
| 2.781545
| 0.022371
| 0.107135
| 0.124688
| 0.058037
| 0.917349
| 0.897604
| 0.884736
| 0.869029
| 0.855793
| 0.843055
| 0
| 0.180231
| 0.287885
| 200,288
| 3,525
| 3,271
| 56.819291
| 0.469228
| 0.025214
| 0
| 0.697767
| 0
| 0.001333
| 0.013416
| 0
| 0
| 0
| 0
| 0
| 0.001666
| 1
| 0.051649
| false
| 0
| 0.000666
| 0.014662
| 0.109297
| 0.011663
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b7f6238937ccef5917c1831919c6671688c60eb3
| 14,027
|
py
|
Python
|
ECE_mechanism/plot_voltammogram_ECE.py
|
truejulosdu13/Electrochemistry
|
183914d75f7d8ec8fdaa03a1f5133f24afaf6f38
|
[
"MIT"
] | null | null | null |
ECE_mechanism/plot_voltammogram_ECE.py
|
truejulosdu13/Electrochemistry
|
183914d75f7d8ec8fdaa03a1f5133f24afaf6f38
|
[
"MIT"
] | null | null | null |
ECE_mechanism/plot_voltammogram_ECE.py
|
truejulosdu13/Electrochemistry
|
183914d75f7d8ec8fdaa03a1f5133f24afaf6f38
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
viridis = cm.get_cmap('viridis', 12)
from potential_applied import *
from ECE_mechanism.EDP_solver_ECE import *
# main programm for linear sweep voltammetry
def main_LSV_ECE_red(cst_all):
F_norm = cst_all["F_norm"]
Nx = cst_all["Nx"]
Nt = cst_all["Nt"]
DM = cst_all["DM"]
Dx = cst_all["Dx"]
k_p, k_m = cst_all["k_p"], cst_all["k_m"]
(E, tk) = rampe(cst_all["E_i"], cst_all["E_ox"], cst_all["E_red"], cst_all["v"], cst_all["Ox"])
## time step
Dt = tk/cst_all["Nt"]
print("DM = ", cst_all["DM"], "and lambda = ", cst_all["Lambda"])
## profil de concentration inital
C_new = np.append([cst_all["C_a"] for i in range(Nx)], [cst_all["C_b"] for i in range(Nx)])
C_new = np.append(C_new, [cst_all["C_c"] for i in range(Nx)])
C_new = np.append(C_new, [cst_all["C_d"] for i in range(Nx)])
## propagation temporelle
fig, ax = plt.subplots(6, figsize=(10, 30))
(M_new_constant, M_old) = Matrix_constant_ECE(Nx, Dt, 4, k_p, k_m, DM)
I = np.array(())
for i in range(Nt):
C_old = C_new
t = i*Dt
M_new = Matrix_ECE_boundaries_red(M_new_constant, t, E,
cst_all["Lambda"],
Nx,
F_norm,
cst_all["E_0_1"],
cst_all["E_0_2"],
cst_all["alpha"],
cst_all["n"])
C_new = compute_Cnew_ECE(M_new, M_old, C_old,
cst_all["C_a"],
cst_all["C_b"],
cst_all["C_c"],
cst_all["C_d"],
Nx)
I = np.append(I, compute_I_ECE_red(C_new, cst_all))
if i % math.floor(Nt/10) == 0:
ax[0].plot([j*Dx for j in range(Nx)], C_new[:-3*Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[1].plot([j*Dx for j in range(Nx)], C_new[Nx:-2*Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[2].plot([j*Dx for j in range(Nx)], C_new[2*Nx:-Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[3].plot([j*Dx for j in range(Nx)], C_new[3*Nx:], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[4].plot([E(i*(Dt)) for i in range(Nt)], I)
ax[5].plot([i*Dt for i in range(Nt)], [E(i*(Dt)) for i in range(Nt)])
ax[0].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[1].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[2].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[3].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[0].title.set_text('Profil de concentration de A en fonction du temps')
ax[1].title.set_text('Profil de concentration de B en fonction du temps')
ax[2].title.set_text('Profil de concentration de C en fonction du temps')
ax[3].title.set_text('Profil de concentration de D en fonction du temps')
titre_i_E = f"Courbe intensité potentiel ECE E1 = {cst_all['E_0_1']} V et E2 = {cst_all['E_0_2']} V."
ax[4].title.set_text(titre_i_E)
ax[5].title.set_text('E(t)')
plt.savefig('ECE.png')
plt.show()
return(I)
# main programm for cyclic staircase voltammetry
def main_CSV_ECE_red(cst_all):
F_norm = cst_all["F_norm"]
Nx = cst_all["Nx"]
Nt = cst_all["Nt"]
DM = cst_all["DM"]
Dx = cst_all["Dx"]
k_p, k_m = cst_all["k_p"], cst_all["k_m"]
(E, tk) = CSV(cst_all["E_i"], cst_all["E_ox"], cst_all["E_red"], cst_all["Delta_E"], cst_all["v"])
## time step
Dt = tk/cst_all["Nt"]
print("DM = ", cst_all["DM"], "and lambda = ", cst_all["Lambda"])
## profil de concentration inital
C_new = np.append([cst_all["C_a"] for i in range(Nx)], [cst_all["C_b"] for i in range(Nx)])
C_new = np.append(C_new, [cst_all["C_c"] for i in range(Nx)])
C_new = np.append(C_new, [cst_all["C_d"] for i in range(Nx)])
## propagation temporelle
fig, ax = plt.subplots(6, figsize=(10, 30))
(M_new_constant, M_old) = Matrix_constant_ECE(Nx, Dt, 4, k_p, k_m, DM)
I = np.array(())
for i in range(Nt):
C_old = C_new
t = i*Dt
M_new = Matrix_ECE_boundaries_red(M_new_constant, t, E,
cst_all["Lambda"],
Nx,
F_norm,
cst_all["E_0_1"],
cst_all["E_0_2"],
cst_all["alpha"],
cst_all["n"])
C_new = compute_Cnew_ECE(M_new, M_old, C_old,
cst_all["C_a"],
cst_all["C_b"],
cst_all["C_c"],
cst_all["C_d"],
Nx)
I = np.append(I, compute_I_ECE_red(C_new, cst_all))
if i % math.floor(Nt/10) == 0:
ax[0].plot([j*Dx for j in range(Nx)], C_new[:-3*Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[1].plot([j*Dx for j in range(Nx)], C_new[Nx:-2*Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[2].plot([j*Dx for j in range(Nx)], C_new[2*Nx:-Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[3].plot([j*Dx for j in range(Nx)], C_new[3*Nx:], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[4].plot([E(i*(Dt)) for i in range(Nt)], I)
ax[5].plot([i*Dt for i in range(Nt)], [E(i*(Dt)) for i in range(Nt)])
ax[0].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[1].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[2].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[3].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[0].title.set_text('Profil de concentration de A en fonction du temps')
ax[1].title.set_text('Profil de concentration de B en fonction du temps')
ax[2].title.set_text('Profil de concentration de C en fonction du temps')
ax[3].title.set_text('Profil de concentration de D en fonction du temps')
titre_i_E = f"Courbe intensité potentiel ECE E1 = {cst_all['E_0_1']} V et E2 = {cst_all['E_0_2']} V."
ax[4].title.set_text(titre_i_E)
ax[5].title.set_text('E(t)')
plt.savefig('ECE.png')
plt.show()
return(I)
# main programm for cyclic staircase voltammetry
def main_CSV_ECE_ox(cst_all):
F_norm = cst_all["F_norm"]
Nx = cst_all["Nx"]
Nt = cst_all["Nt"]
DM = cst_all["DM"]
Dx = cst_all["Dx"]
k_p, k_m = cst_all["k_p"], cst_all["k_m"]
(E, tk) = CSV(cst_all["E_i"], cst_all["E_ox"], cst_all["E_red"], cst_all["Delta_E"], cst_all["v"])
## time step
Dt = tk/cst_all["Nt"]
print("DM = ", cst_all["DM"], "and lambda = ", cst_all["Lambda"])
## profil de concentration inital
C_new = np.append([cst_all["C_a"] for i in range(Nx)], [cst_all["C_b"] for i in range(Nx)])
C_new = np.append(C_new, [cst_all["C_c"] for i in range(Nx)])
C_new = np.append(C_new, [cst_all["C_d"] for i in range(Nx)])
## propagation temporelle
fig, ax = plt.subplots(6, figsize=(10, 30))
(M_new_constant, M_old) = Matrix_constant_ECE(Nx, Dt, 4, k_p, k_m, DM)
I = np.array(())
for i in range(Nt):
C_old = C_new
t = i*Dt
M_new = Matrix_ECE_boundaries_ox(M_new_constant, t, E,
cst_all["Lambda"],
Nx,
F_norm,
cst_all["E_0_1"],
cst_all["E_0_2"],
cst_all["alpha"],
cst_all["n"])
C_new = compute_Cnew_ECE(M_new, M_old, C_old,
cst_all["C_a"],
cst_all["C_b"],
cst_all["C_c"],
cst_all["C_d"],
Nx)
I = np.append(I, compute_I_ECE_red(C_new, cst_all))
if i % math.floor(Nt/10) == 0:
ax[0].plot([j*Dx for j in range(Nx)], C_new[:-3*Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[1].plot([j*Dx for j in range(Nx)], C_new[Nx:-2*Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[2].plot([j*Dx for j in range(Nx)], C_new[2*Nx:-Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[3].plot([j*Dx for j in range(Nx)], C_new[3*Nx:], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[4].plot([E(i*(Dt)) for i in range(Nt)], I)
ax[5].plot([i*Dt for i in range(Nt)], [E(i*(Dt)) for i in range(Nt)])
ax[0].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[1].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[2].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[3].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[0].title.set_text('Profil de concentration de A en fonction du temps')
ax[1].title.set_text('Profil de concentration de B en fonction du temps')
ax[2].title.set_text('Profil de concentration de C en fonction du temps')
ax[3].title.set_text('Profil de concentration de D en fonction du temps')
titre_i_E = f"Courbe intensité potentiel ECE E1 = {cst_all['E_0_1']} V et E2 = {cst_all['E_0_2']} V."
ax[4].title.set_text(titre_i_E)
ax[5].title.set_text('E(t)')
plt.savefig('ECE.png')
plt.show()
return(I)
# main programm for square wave voltammetry
def main_SWV_ECE_red(cst_all):
F_norm = cst_all["F_norm"]
Nx = cst_all["Nx"]
Nt = cst_all["Nt"]
DM = cst_all["DM"]
Dx = cst_all["Dx"]
(E, E_sweep, tk) = SWV(cst_all["E_i"],
cst_all["E_ox"],
cst_all["E_red"],
cst_all["E_SW"],
cst_all["Delta_E"],
cst_all["f"],
cst_all["Ox"])
k_p, k_m = cst_all["k_p"], cst_all["k_m"]
## time step
Dt = tk/Nt
print("DM = ", DM, "and lambda = ", cst_all["Lambda"])
print("Dt = ", Dt, "and T = 2Pi/f = ", 2*np.pi/cst_all["f"])
# arbitrary criteria to check if the time step is small enough compared to the time step of the SW
if 20*Dt > 2*np.pi/cst_all["f"]:
print("YOU SHOULD INCREASE THE NUMBER OF TIME STEPS TO GET MEANINGFUL RESULTS !")
## profil de concentration inital
C_new = np.append([cst_all["C_a"] for i in range(Nx)], [cst_all["C_b"] for i in range(Nx)])
C_new = np.append(C_new, [cst_all["C_c"] for i in range(Nx)])
C_new = np.append(C_new, [cst_all["C_d"] for i in range(Nx)])
## propagation temporelle
fig, ax = plt.subplots(6, figsize=(10, 30))
(M_new_constant, M_old) = Matrix_constant_ECE(Nx, Dt, 4, k_p, k_m, DM)
I = np.array(())
for i in range(Nt):
C_old = C_new
t = i*Dt
M_new = Matrix_ECE_boundaries_red(M_new_constant, t, E,
cst_all["Lambda"],
Nx,
F_norm,
cst_all["E_0_1"],
cst_all["E_0_2"],
cst_all["alpha"],
cst_all["n"])
C_new = compute_Cnew_ECE(M_new, M_old, C_old,
cst_all["C_a"],
cst_all["C_b"],
cst_all["C_c"],
cst_all["C_d"],
Nx)
I = np.append(I, compute_I_ECE_red(C_new, cst_all))
if i % math.floor(Nt/10) == 0:
ax[0].plot([j*Dx for j in range(Nx)], C_new[:-3*Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[1].plot([j*Dx for j in range(Nx)], C_new[Nx:-2*Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[2].plot([j*Dx for j in range(Nx)], C_new[2*Nx:-Nx], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[3].plot([j*Dx for j in range(Nx)], C_new[3*Nx:], label= 'time = %is' %(i*Dt), color = viridis(i/Nt))
ax[4].plot([E(i*(Dt)) for i in range(Nt)], I)
ax[5].plot([i*Dt for i in range(Nt)], [E(i*(Dt)) for i in range(Nt)])
ax[0].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[1].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[2].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[3].legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
ax[0].title.set_text('Profil de concentration de A en fonction du temps')
ax[1].title.set_text('Profil de concentration de B en fonction du temps')
ax[2].title.set_text('Profil de concentration de C en fonction du temps')
ax[3].title.set_text('Profil de concentration de D en fonction du temps')
titre_i_E = f"Courbe SWV ECE E1 = {cst_all['E_0_1']} V et E2 = {cst_all['E_0_2']} V."
ax[4].title.set_text(titre_i_E)
ax[5].title.set_text('E(t)')
plt.savefig('ECE.png')
plt.show()
return(I)
| 46.44702
| 118
| 0.518571
| 2,278
| 14,027
| 2.989464
| 0.067603
| 0.114537
| 0.032893
| 0.051689
| 0.920264
| 0.920264
| 0.91072
| 0.91072
| 0.91072
| 0.91072
| 0
| 0.0254
| 0.317958
| 14,027
| 302
| 119
| 46.44702
| 0.686422
| 0.037856
| 0
| 0.882353
| 0
| 0.016807
| 0.151036
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016807
| false
| 0
| 0.029412
| 0
| 0.046218
| 0.02521
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d149344e51a509f5b984464bdb09c9a2f695ddb
| 40,880
|
py
|
Python
|
python_msx_sdk/api/workflows_api.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
python_msx_sdk/api/workflows_api.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
python_msx_sdk/api/workflows_api.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
"""
MSX SDK
MSX SDK client. # noqa: E501
The version of the OpenAPI document: 1.0.9
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from python_msx_sdk.api_client import ApiClient, Endpoint as _Endpoint
from python_msx_sdk.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from python_msx_sdk.model.error import Error
from python_msx_sdk.model.start_workflow_response import StartWorkflowResponse
from python_msx_sdk.model.validate_workflow_response import ValidateWorkflowResponse
from python_msx_sdk.model.workflow import Workflow
from python_msx_sdk.model.workflow_mapping import WorkflowMapping
from python_msx_sdk.model.workflow_start_config import WorkflowStartConfig
class WorkflowsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __delete_workflow(
self,
id,
**kwargs
):
"""Delete a workflow. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_workflow(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.delete_workflow = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/workflow/api/v8/workflows/{id}',
'operation_id': 'delete_workflow',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_workflow
)
def __export_workflow(
self,
id,
**kwargs
):
"""Exports a workflow. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_workflow(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
{str: (bool, date, datetime, dict, float, int, list, str, none_type)}
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.export_workflow = _Endpoint(
settings={
'response_type': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},),
'auth': [],
'endpoint_path': '/workflow/api/v8/workflows/{id}/export',
'operation_id': 'export_workflow',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__export_workflow
)
def __get_workflow(
self,
id,
**kwargs
):
"""Returns a workflow. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_workflow(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Workflow
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_workflow = _Endpoint(
settings={
'response_type': (Workflow,),
'auth': [],
'endpoint_path': '/workflow/api/v8/workflows/{id}',
'operation_id': 'get_workflow',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_workflow
)
def __get_workflow_start_config(
self,
id,
**kwargs
):
"""Returns a workflow start config. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_workflow_start_config(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorkflowStartConfig
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_workflow_start_config = _Endpoint(
settings={
'response_type': (WorkflowStartConfig,),
'auth': [],
'endpoint_path': '/workflow/api/v8/workflows/{id}/startconfig',
'operation_id': 'get_workflow_start_config',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_workflow_start_config
)
def __get_workflows_list(
self,
**kwargs
):
"""Returns a list of workflows. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_workflows_list(async_req=True)
>>> result = thread.get()
Keyword Args:
tenant_id (str): [optional]
atomic (bool): [optional] if omitted the server will use the default value of False
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[Workflow]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.call_with_http_info(**kwargs)
self.get_workflows_list = _Endpoint(
settings={
'response_type': ([Workflow],),
'auth': [],
'endpoint_path': '/workflow/api/v8/workflows/list',
'operation_id': 'get_workflows_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'tenant_id',
'atomic',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'tenant_id':
(str,),
'atomic':
(bool,),
},
'attribute_map': {
'tenant_id': 'tenantId',
'atomic': 'atomic',
},
'location_map': {
'tenant_id': 'query',
'atomic': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_workflows_list
)
def __import_workflow(
self,
request_body,
**kwargs
):
"""Imports a workflow. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.import_workflow(request_body, async_req=True)
>>> result = thread.get()
Args:
request_body ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}):
Keyword Args:
tenant_ids ([str]): [optional]
_global (bool): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorkflowMapping
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['request_body'] = \
request_body
return self.call_with_http_info(**kwargs)
self.import_workflow = _Endpoint(
settings={
'response_type': (WorkflowMapping,),
'auth': [],
'endpoint_path': '/workflow/api/v8/workflows',
'operation_id': 'import_workflow',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'request_body',
'tenant_ids',
'_global',
],
'required': [
'request_body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'request_body':
({str: (bool, date, datetime, dict, float, int, list, str, none_type)},),
'tenant_ids':
([str],),
'_global':
(bool,),
},
'attribute_map': {
'tenant_ids': 'tenantIds',
'_global': 'global',
},
'location_map': {
'request_body': 'body',
'tenant_ids': 'query',
'_global': 'query',
},
'collection_format_map': {
'tenant_ids': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__import_workflow
)
def __start_workflow(
self,
id,
workflow_start_config,
**kwargs
):
"""Starts a workflow. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_workflow(id, workflow_start_config, async_req=True)
>>> result = thread.get()
Args:
id (str):
workflow_start_config (WorkflowStartConfig):
Keyword Args:
sync (bool): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[StartWorkflowResponse]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['workflow_start_config'] = \
workflow_start_config
return self.call_with_http_info(**kwargs)
self.start_workflow = _Endpoint(
settings={
'response_type': ([StartWorkflowResponse],),
'auth': [],
'endpoint_path': '/workflow/api/v8/workflows/{id}/start',
'operation_id': 'start_workflow',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'workflow_start_config',
'sync',
],
'required': [
'id',
'workflow_start_config',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'workflow_start_config':
(WorkflowStartConfig,),
'sync':
(bool,),
},
'attribute_map': {
'id': 'id',
'sync': 'sync',
},
'location_map': {
'id': 'path',
'workflow_start_config': 'body',
'sync': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__start_workflow
)
def __update_workflow(
self,
id,
request_body,
**kwargs
):
"""Updates a workflow. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_workflow(id, request_body, async_req=True)
>>> result = thread.get()
Args:
id (str):
request_body ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}):
Keyword Args:
tenant_ids ([str]): [optional]
_global (bool): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
WorkflowMapping
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['request_body'] = \
request_body
return self.call_with_http_info(**kwargs)
self.update_workflow = _Endpoint(
settings={
'response_type': (WorkflowMapping,),
'auth': [],
'endpoint_path': '/workflow/api/v8/workflows/{id}',
'operation_id': 'update_workflow',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'id',
'request_body',
'tenant_ids',
'_global',
],
'required': [
'id',
'request_body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'request_body':
({str: (bool, date, datetime, dict, float, int, list, str, none_type)},),
'tenant_ids':
([str],),
'_global':
(bool,),
},
'attribute_map': {
'id': 'id',
'tenant_ids': 'tenantIds',
'_global': 'global',
},
'location_map': {
'id': 'path',
'request_body': 'body',
'tenant_ids': 'query',
'_global': 'query',
},
'collection_format_map': {
'tenant_ids': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update_workflow
)
def __validate_workflow(
self,
id,
**kwargs
):
"""Validates a workflow. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.validate_workflow(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
ValidateWorkflowResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.validate_workflow = _Endpoint(
settings={
'response_type': (ValidateWorkflowResponse,),
'auth': [],
'endpoint_path': '/workflow/api/v8/workflows/{id}/validate',
'operation_id': 'validate_workflow',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__validate_workflow
)
| 35.796848
| 106
| 0.44797
| 3,438
| 40,880
| 5.075044
| 0.058173
| 0.032497
| 0.026823
| 0.027854
| 0.880273
| 0.86583
| 0.846917
| 0.842217
| 0.82892
| 0.817228
| 0
| 0.002751
| 0.466561
| 40,880
| 1,141
| 107
| 35.828221
| 0.797359
| 0.32248
| 0
| 0.662304
| 1
| 0
| 0.21037
| 0.04233
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013089
| false
| 0
| 0.018325
| 0
| 0.044503
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d4dd62fa943b361403572f6c7b5c4cdc03bb706
| 131
|
py
|
Python
|
spyns/lattice/__init__.py
|
datamaterials/spyns
|
68e8412ba003e2d882373db93f322497be7bff93
|
[
"MIT"
] | 9
|
2019-12-06T06:54:04.000Z
|
2022-03-14T00:16:47.000Z
|
spyns/lattice/__init__.py
|
jkglasbrenner/spyns
|
68e8412ba003e2d882373db93f322497be7bff93
|
[
"MIT"
] | 1
|
2018-10-31T16:41:07.000Z
|
2018-11-19T21:19:56.000Z
|
spyns/lattice/__init__.py
|
datamaterials/spyns
|
68e8412ba003e2d882373db93f322497be7bff93
|
[
"MIT"
] | 2
|
2019-12-06T06:06:45.000Z
|
2020-02-12T11:35:30.000Z
|
# -*- coding: utf-8 -*-
from spyns.lattice.lattice import Lattice
import spyns.lattice.generate
import spyns.lattice.neighborhood
| 21.833333
| 41
| 0.778626
| 17
| 131
| 6
| 0.529412
| 0.352941
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008547
| 0.10687
| 131
| 5
| 42
| 26.2
| 0.863248
| 0.160305
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4d7ca01edc479797114d76925f3dabfbc626f7df
| 80
|
py
|
Python
|
initialrepo/main.py
|
naik-jay/initialrepo
|
03201f4e15a0fa5d7a99ddb3f52345d5a19cf960
|
[
"Apache-2.0"
] | null | null | null |
initialrepo/main.py
|
naik-jay/initialrepo
|
03201f4e15a0fa5d7a99ddb3f52345d5a19cf960
|
[
"Apache-2.0"
] | null | null | null |
initialrepo/main.py
|
naik-jay/initialrepo
|
03201f4e15a0fa5d7a99ddb3f52345d5a19cf960
|
[
"Apache-2.0"
] | null | null | null |
def _print_main():
print("main! successfully imported package from github")
| 26.666667
| 60
| 0.75
| 10
| 80
| 5.8
| 0.8
| 0.310345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 80
| 2
| 61
| 40
| 0.852941
| 0
| 0
| 0
| 0
| 0
| 0.5875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0.5
| 0
| 1
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
4d9c2163a2508ce1b21a0e6bfd88b0ead87e1f9b
| 169
|
py
|
Python
|
activityio/_types/__init__.py
|
moritzhoferer/activityio
|
c24526d967a6de535c60f29846f17f81bf8fdfaf
|
[
"MIT"
] | 11
|
2018-05-07T09:56:15.000Z
|
2020-12-20T16:47:01.000Z
|
activityio/_types/__init__.py
|
moritzhoferer/activityio
|
c24526d967a6de535c60f29846f17f81bf8fdfaf
|
[
"MIT"
] | null | null | null |
activityio/_types/__init__.py
|
moritzhoferer/activityio
|
c24526d967a6de535c60f29846f17f81bf8fdfaf
|
[
"MIT"
] | 5
|
2018-11-08T14:13:31.000Z
|
2020-12-27T20:42:56.000Z
|
from activityio._types.base import *
from activityio._types import columns as special_columns
from activityio._types.activitydata import ActivityData # important! last
| 42.25
| 74
| 0.846154
| 21
| 169
| 6.619048
| 0.52381
| 0.302158
| 0.410072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106509
| 169
| 3
| 75
| 56.333333
| 0.92053
| 0.088757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
12ce0a948035e60f01c3389a8a8882f44476b6e9
| 25,148
|
py
|
Python
|
backend/tests/unittests/metric_source/checkmarx_tests.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 25
|
2016-11-25T10:41:24.000Z
|
2021-07-03T14:02:49.000Z
|
backend/tests/unittests/metric_source/checkmarx_tests.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 783
|
2016-09-19T12:10:21.000Z
|
2021-01-04T20:39:15.000Z
|
backend/tests/unittests/metric_source/checkmarx_tests.py
|
ICTU/quality-report
|
f6234e112228ee7cfe6476c2d709fe244579bcfe
|
[
"Apache-2.0"
] | 15
|
2015-03-25T13:52:49.000Z
|
2021-03-08T17:17:56.000Z
|
"""
Copyright 2012-2019 Ministerie van Sociale Zaken en Werkgelegenheid
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import ssl
import logging
import datetime
import xml.etree.cElementTree
import urllib.error
from typing import List
import unittest
from unittest.mock import patch, call, MagicMock
import requests
from hqlib.metric_source import Checkmarx, url_opener
PROJECTS = '[{"id": 11, "name": "metric_source_id"}, {"id": 22, "name": "id2"}]'
LAST_SCAN = '[{"id": 10111, "dateAndTime": {"finishedOn": "2017-10-24T20:00:47.553"}}]'
STATISTICS = '{"highSeverity": 4, "mediumSeverity": 7}'
SAST_REPORT = '''<?xml version="1.0" encoding="utf-8"?>
<CxXMLResults>
<Query id="789" name="Reflected_XSS" group="JScript_Vulnerabilities" Severity="{severity}" QueryVersionCode="842956">
<Result Status="Recurrent" FalsePositive="{false_positive}" >
</Result>
</Query>
</CxXMLResults>
'''
class CheckmarxIssueTest(unittest.TestCase):
""" Unit tests for Issue class. """
def test_issue(self):
""" Test if issue is created correctly. """
issue = Checkmarx.Issue('a_group', 'the_name', 'http://url', 3, 'New')
self.assertEqual('a group', issue.group)
self.assertEqual('the name', issue.title)
self.assertEqual('http://url', issue.display_url)
self.assertEqual(3, issue.count)
self.assertEqual('New', issue.status)
class CheckmarxConstructorTest(unittest.TestCase):
""" Unit tests for constructor of Checkmarx class. """
# pylint: disable=too-many-public-methods
def setUp(self):
# pylint: disable=protected-access
Checkmarx._Checkmarx__retrieve_access_token.cache_clear()
@patch.object(logging, 'error')
@patch.object(url_opener.UrlOpener, 'url_read')
def test_checkmarx_init(self, mock_url_read, mock_error):
""" Test that initialization of checkmarx goes correctly. """
mock_url_read.return_value = '{"access_token": "abc123"}'
marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec
self.assertIsNotNone(marx)
mock_url_read.assert_called_once_with(
'http://url/cxrestapi/auth/identity/connect/token',
post_body=b'username=un&password=pwd&scope=sast_rest_api&grant_type=password&'
b'client_id=resource_owner_client&client_secret=014DF517-39D1-4453-B7B3-9930C563627C')
mock_error.assert_not_called()
@patch('ssl._create_unverified_context')
@patch.object(url_opener.UrlOpener, 'url_read')
def test_checkmarx_init_no_ssl(self, mock_url_read, mock_create_unverified_context):
""" Test that initialization of checkmarx goes correctly without ssl. """
# pylint: disable=protected-access
delattr(ssl, '_create_unverified_context')
mock_url_read.return_value = '{"access_token": "abc123"}'
marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec
self.assertIsNotNone(marx)
self.assertFalse(hasattr(ssl, '_create_unverified_context'))
self.assertTrue(hasattr(ssl, '_create_default_https_context'))
mock_create_unverified_context.assert_not_called()
@patch.object(logging, 'error')
@patch.object(url_opener.UrlOpener, 'url_read')
def test_checkmarx_init_http_error(self, mock_url_read, mock_error):
""" Test initialization of checkmarx when http error occures. """
mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)
marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec
self.assertIsNotNone(marx)
mock_url_read.assert_called_once_with(
'http://url/cxrestapi/auth/identity/connect/token',
post_body=b'username=un&password=pwd&scope=sast_rest_api&grant_type=password&'
b'client_id=resource_owner_client&client_secret=014DF517-39D1-4453-B7B3-9930C563627C')
mock_error.assert_called_once_with("HTTP error during the retrieving of access token!")
@patch.object(logging, 'error')
@patch.object(url_opener.UrlOpener, 'url_read')
def test_checkmarx_init_invalid_jon_error(self, mock_url_read, mock_error):
""" Test initialization of checkmarx with invalid json response. """
mock_url_read.return_value = 'non-json'
marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec
self.assertIsNotNone(marx)
mock_url_read.assert_called_once_with(
'http://url/cxrestapi/auth/identity/connect/token',
post_body=b'username=un&password=pwd&scope=sast_rest_api&grant_type=password&'
b'client_id=resource_owner_client&client_secret=014DF517-39D1-4453-B7B3-9930C563627C')
self.assertEqual(mock_error.call_args[0][0], "Couldn't load access token from json: %s.")
self.assertIsInstance(mock_error.call_args[0][1], ValueError)
@patch.object(logging, 'error')
@patch.object(url_opener.UrlOpener, 'url_read')
def test_checkmarx_init_key_missing_error(self, mock_url_read, mock_error):
""" Test initialization of checkmarx with invalid json response. """
# pylint: disable=protected-access
mock_url_read.return_value = '{}'
marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec
self.assertIsNotNone(marx)
mock_url_read.assert_called_once_with(
'http://url/cxrestapi/auth/identity/connect/token',
post_body=b'username=un&password=pwd&scope=sast_rest_api&grant_type=password&'
b'client_id=resource_owner_client&client_secret=014DF517-39D1-4453-B7B3-9930C563627C')
self.assertEqual(mock_error.call_args[0][0], "Couldn't load access token from json: %s.")
self.assertIsInstance(mock_error.call_args[0][1], KeyError)
self.assertEqual(ssl._create_default_https_context, ssl._create_unverified_context)
@patch.object(url_opener.UrlOpener, 'url_read')
@patch.object(requests, 'delete')
class CheckmarxTest(unittest.TestCase):
""" Unit tests for the Checkmarx class. """
# pylint: disable=too-many-public-methods
def setUp(self):
time.sleep = MagicMock()
# pylint: disable=protected-access
Checkmarx._fetch_project_id.cache_clear()
with patch.object(url_opener.UrlOpener, 'url_read', return_value='{"access_token": "abc123"}'):
self.__report = Checkmarx('http://url', 'username', 'password')
def test_high_risk_warnings(self, mock_delete, mock_url_read):
""" Test the number of high risk warnings. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN, STATISTICS]
self.assertEqual(4, self.__report.nr_warnings(['metric_source_id'], 'high'))
mock_delete.assert_not_called()
self.assertEqual(mock_url_read.call_args_list[0][0][0], 'http://url/CxRestAPI/projects')
self.assertEqual(mock_url_read.call_args_list[1][0][0],
'http://url/CxRestAPI/sast/scans?projectId=11&last=1&scanStatus=7')
self.assertEqual(mock_url_read.call_args_list[2][0][0],
'http://url/CxRestAPI/sast/scans/10111/resultsStatistics')
@patch.object(logging, 'error')
def test_nr_warnings_no_project(self, mock_error, mock_delete, mock_url_read):
""" Test the number of high risk warnings. """
mock_url_read.return_value = PROJECTS
self.assertEqual(-1, self.__report.nr_warnings(['unknown_proj_id'], 'high'))
mock_delete.assert_not_called()
mock_error.assert_called_once_with("Error: no project id found for project with name '%s'.", 'unknown_proj_id')
def test_obtain_issues(self, mock_delete, mock_url_read):
""" Test that issues are correctly obtained. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN, '{"reportId": 22}', '{"status": {"value": "Created"}}',
SAST_REPORT.format(false_positive=False, severity='High')]
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
self.assertIsInstance(issues, List)
self.assertIsInstance(issues[0], Checkmarx.Issue)
self.assertEqual('JScript Vulnerabilities', issues[0].group)
self.assertEqual('Reflected XSS', issues[0].title)
self.assertEqual('http://url/CxWebClient/ScanQueryDescription.aspx?queryID=789&'
'queryVersionCode=842956&queryTitle=Reflected_XSS', issues[0].display_url)
mock_delete.assert_called_once_with('http://url/CxRestAPI/reports/sastScan/22',
headers={'Authorization': 'Bearer abc123'})
self.assertEqual(1, issues[0].count)
self.assertEqual("Recurrent", issues[0].status)
self.assertEqual(mock_url_read.call_args_list[0][0][0], 'http://url/CxRestAPI/projects')
self.assertEqual(mock_url_read.call_args_list[1][0][0],
'http://url/CxRestAPI/sast/scans?projectId=11&last=1&scanStatus=7')
self.assertEqual(mock_url_read.call_args_list[2][0][0], 'http://url/CxRestAPI/reports/sastScan')
self.assertEqual(mock_url_read.call_args_list[3][0][0], 'http://url/CxRestAPI/reports/sastScan/22/status')
self.assertEqual(mock_url_read.call_args_list[4][0][0], 'http://url/CxRestAPI/reports/sastScan/22')
@patch.object(logging, 'error')
def test_obtain_issues_xml_error(self, mock_error, mock_delete, mock_url_read):
""" Test that issues are correctly obtained. """
mock_url_read.side_effect = \
[PROJECTS, LAST_SCAN, '{"reportId": 22}', '{"status": {"value": "Created"}}', 'not-an-xml']
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_called_once_with('http://url/CxRestAPI/reports/sastScan/22',
headers={'Authorization': 'Bearer abc123'})
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
self.assertEqual(mock_error.call_args[0][0], "Error in checkmarx report xml: %s.")
self.assertIsInstance(mock_error.call_args[0][1], xml.etree.ElementTree.ParseError)
@patch.object(logging, 'error')
def test_obtain_issue_ssast_report_not_created(self, mock_error, mock_delete, mock_url_read):
""" Test that issues are correctly obtained. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN, '{"reportId": 22}'] + \
['{"status": {"value": "InProgress"}}'] * 10
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_not_called()
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
mock_error.assert_called_once_with("SAST report is not created on the Checkmarx server!")
@patch.object(logging, 'error')
def test_obtain_issues_xml_tag_error(self, mock_error, mock_delete, mock_url_read):
""" Test that issues are correctly obtained. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN, '{"reportId": 22}', '{"status": {"value": "Created"}}',
'<CxXMLResults><Query /></CxXMLResults>']
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_called_once_with('http://url/CxRestAPI/reports/sastScan/22',
headers={'Authorization': 'Bearer abc123'})
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
self.assertEqual(mock_error.call_args[0][0], "Tag %s could not be found.")
self.assertIsInstance(mock_error.call_args[0][1], KeyError)
def test_obtain_issues_exclude_false_positives(self, mock_delete, mock_url_read):
""" Test that issues are omitted when false positive. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN, '{"reportId": 22}', '{"status": {"value": "Created"}}',
SAST_REPORT.format(false_positive=True, severity='High')]
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_called_once_with('http://url/CxRestAPI/reports/sastScan/22',
headers={'Authorization': 'Bearer abc123'})
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
def test_obtain_issues_exclude_wrong_severity(self, mock_delete, mock_url_read):
""" Test that issues are omitted when severity does not match. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN, '{"reportId": 22}', '{"status": {"value": "Created"}}',
SAST_REPORT.format(false_positive=False, severity='Low')]
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_called_once_with('http://url/CxRestAPI/reports/sastScan/22',
headers={'Authorization': 'Bearer abc123'})
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
def test_obtain_issues_no_query(self, mock_delete, mock_url_read):
""" Test that issues are omitted when there is no query. """
mock_url_read.side_effect = \
[PROJECTS, LAST_SCAN, '{"reportId": 22}', '{"status": {"value": "Created"}}', '<CxXMLResults />']
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_called_once_with('http://url/CxRestAPI/reports/sastScan/22',
headers={'Authorization': 'Bearer abc123'})
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
def test_obtain_issues_http_error(self, mock_delete, mock_url_read):
""" Test that issues are omitted when http error occurs. """
mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_not_called()
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
@patch.object(logging, 'error')
def test_obtain_issues_response_error(self, mock_error, mock_delete, mock_url_read):
""" Test that issues are omitted when json error occurs. """
mock_url_read.return_value = 'non-json'
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_not_called()
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
self.assertEqual(mock_error.call_args[0][0], "Error loading json: %s.")
self.assertIsInstance(mock_error.call_args[0][1], ValueError)
@patch.object(logging, 'error')
def test_obtain_issues_index_error(self, mock_error, mock_delete, mock_url_read):
""" Test that issues are omitted when json contains nothing. """
mock_url_read.side_effect = [PROJECTS, '[]']
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_not_called()
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
self.assertEqual(mock_error.call_args[0], ("There are still no scans for project %s.", 'metric_source_id'))
@patch.object(logging, 'error')
def test_obtain_issues_json_error(self, mock_error, mock_delete, mock_url_read):
""" Test that issues are omitted when json error occurs. """
mock_url_read.side_effect = [PROJECTS, '{}']
self.__report.obtain_issues(['metric_source_id'], 'high')
issues = self.__report.issues()
mock_delete.assert_not_called()
self.assertIsInstance(issues, List)
self.assertEqual(len(issues), 0)
self.assertEqual(mock_error.call_args[0][0], "Tag %s could not be found.")
self.assertIsInstance(mock_error.call_args[0][1], KeyError)
def test_medium_risk_warnings(self, mock_delete, mock_url_read):
""" Test the number of medium risk warnings. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN, STATISTICS]
self.assertEqual(7, self.__report.nr_warnings(['metric_source_id'], 'medium'))
mock_delete.assert_not_called()
def test_passed_raise(self, mock_delete, mock_url_read):
""" Test that the value is -1 when the report can't be opened. """
mock_url_read.side_effect = urllib.error.HTTPError('raise', None, None, None, None)
self.assertEqual(-1, self.__report.nr_warnings(['raise'], 'high'))
mock_url_read.assert_called_once_with('http://url/CxRestAPI/projects')
mock_delete.assert_not_called()
def test_multiple_urls(self, mock_delete, mock_url_read):
""" Test the number of alerts for multiple urls. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN, STATISTICS, PROJECTS, '[{"id": 202222}]', STATISTICS]
self.assertEqual(14, self.__report.nr_warnings(['metric_source_id', 'id2'], 'medium'))
self.assertEqual([
call('http://url/CxRestAPI/projects'),
call('http://url/CxRestAPI/sast/scans?projectId=11&last=1&scanStatus=7'),
call('http://url/CxRestAPI/sast/scans/10111/resultsStatistics'),
call('http://url/CxRestAPI/projects'),
call('http://url/CxRestAPI/sast/scans?projectId=22&last=1&scanStatus=7'),
call('http://url/CxRestAPI/sast/scans/202222/resultsStatistics')
], mock_url_read.call_args_list)
mock_delete.assert_not_called()
def test_metric_source_urls_without_report(self, mock_delete, mock_url_read):
""" Test the metric source urls without metric ids. """
mock_url_read.return_value = None
self.assertEqual([], self.__report.metric_source_urls())
mock_delete.assert_not_called()
def test_metric_source_urls(self, mock_delete, mock_url_read):
""" Test the metric source urls with one metric id. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN]
self.assertEqual(['http://url/CxWebClient/ViewerMain.aspx?scanId=10111&ProjectID=22'],
self.__report.metric_source_urls('id2'))
mock_delete.assert_not_called()
@patch.object(logging, 'error')
def test_metric_source_urls_key_error(self, mock_error, mock_delete, mock_url_read):
""" Test the metric source urls with empty scan response.. """
mock_url_read.side_effect = [PROJECTS, '{}']
self.assertEqual(["http://url/"], self.__report.metric_source_urls('id2'))
self.assertEqual(mock_error.call_args_list[0][0][0], "Couldn't load values from json: %s - %s")
self.assertEqual(mock_error.call_args_list[0][0][1], 'id2')
self.assertIsInstance(mock_error.call_args_list[0][0][2], KeyError)
mock_delete.assert_not_called()
@patch.object(logging, 'error')
def test_metric_source_urls_index_error(self, mock_error, mock_delete, mock_url_read):
""" Test the metric source urls with empty scan response.. """
mock_url_read.side_effect = [PROJECTS, '[]']
self.assertEqual(["http://url/"], self.__report.metric_source_urls('id2'))
self.assertEqual(mock_error.call_args_list[0][0][0], "Couldn't load values from json: %s - %s")
self.assertEqual(mock_error.call_args_list[0][0][1], 'id2')
self.assertIsInstance(mock_error.call_args_list[0][0][2], IndexError)
mock_delete.assert_not_called()
def test_metric_source_urls_on_error(self, mock_delete, mock_url_read):
""" Test the metric source urls when an error occurs. """
mock_url_read.side_effect = [PROJECTS, urllib.error.HTTPError(None, None, None, None, None)]
self.assertEqual(["http://url/"], self.__report.metric_source_urls('id2'))
self.assertEqual([
call('http://url/CxRestAPI/projects'),
call('http://url/CxRestAPI/sast/scans?projectId=22&last=1&scanStatus=7')
], mock_url_read.call_args_list)
mock_delete.assert_not_called()
def test_url(self, mock_delete, mock_url_read):
""" Test the metric source base url. """
mock_url_read.return_value = LAST_SCAN
self.assertEqual("http://url/", self.__report.url())
mock_delete.assert_not_called()
def test_datetime(self, mock_delete, mock_url_read):
""" Test the date and time of the report. """
mock_url_read.side_effect = [PROJECTS, LAST_SCAN]
self.assertEqual(datetime.datetime(2017, 10, 24, 20, 0, 47), self.__report.datetime('id2'))
self.assertEqual([
call('http://url/CxRestAPI/projects'),
call('http://url/CxRestAPI/sast/scans?projectId=22&last=1&scanStatus=7')
], mock_url_read.call_args_list)
mock_delete.assert_not_called()
def test_datetime_http_error(self, mock_delete, mock_url_read):
""" Test the date and time of the report. """
mock_url_read.side_effect = [PROJECTS, urllib.error.HTTPError(None, None, None, None, None)]
self.assertEqual(datetime.datetime.min, self.__report.datetime('id2'))
self.assertEqual([
call('http://url/CxRestAPI/projects'),
call('http://url/CxRestAPI/sast/scans?projectId=22&last=1&scanStatus=7')
], mock_url_read.call_args_list)
mock_delete.assert_not_called()
@patch.object(logging, 'error')
def test_datetime_missing(self, mock_error, mock_delete, mock_url_read):
""" Test a missing date and time of the report. """
mock_url_read.side_effect = [PROJECTS, '[{"id": 202222}]']
self.assertEqual(datetime.datetime.min, self.__report.datetime('id2'))
self.assertEqual(mock_error.call_args_list[0][0][0], "Couldn't parse date and time for project %s from %s: %s")
self.assertEqual(mock_error.call_args_list[0][0][1], 'id2')
self.assertEqual(mock_error.call_args_list[0][0][2], 'http://url/')
self.assertIsInstance(mock_error.call_args_list[0][0][3], KeyError)
mock_delete.assert_not_called()
@patch.object(logging, 'error')
def test_datetime_empty_scan(self, mock_error, mock_delete, mock_url_read):
""" Test a missing scan data. """
mock_url_read.side_effect = [PROJECTS, '[]']
self.assertEqual(datetime.datetime.min, self.__report.datetime('id2'))
self.assertEqual(mock_error.call_args_list[0][0][0], "Couldn't parse date and time for project %s from %s: %s")
self.assertEqual(mock_error.call_args_list[0][0][1], 'id2')
self.assertEqual(mock_error.call_args_list[0][0][2], 'http://url/')
self.assertIsInstance(mock_error.call_args_list[0][0][3], IndexError)
mock_delete.assert_not_called()
@patch.object(logging, 'error')
def test_datetime_format_error(self, mock_error, mock_delete, mock_url_read):
""" Test a invalid date and time of the report. """
mock_url_read.side_effect = [PROJECTS, '[{"id": 3, "dateAndTime": {"finishedOn": "2017-40-24T20:00:47.553"}}]']
self.assertEqual(datetime.datetime.min, self.__report.datetime('id2'))
self.assertEqual(mock_error.call_args_list[0][0][0], "Couldn't parse date and time for project %s from %s: %s")
self.assertEqual(mock_error.call_args_list[0][0][1], 'id2')
self.assertEqual(mock_error.call_args_list[0][0][2], 'http://url/')
self.assertIsInstance(mock_error.call_args_list[0][0][3], ValueError)
mock_delete.assert_not_called()
@patch.object(logging, 'error')
def test_nr_warnings_on_missing_values(self, mock_error, mock_delete, mock_url_read):
""" Test dealing with empty list of values. """
mock_url_read.side_effect = [PROJECTS, '{}']
self.assertEqual(-1, self.__report.nr_warnings(['id2'], 'medium'))
self.assertEqual(mock_error.call_args_list[0][0][0],
"Couldn't parse alerts for project %s with %s risk level from %s: %s")
self.assertEqual(mock_error.call_args_list[0][0][1], 'id2')
self.assertEqual(mock_error.call_args_list[0][0][2], 'medium')
self.assertEqual(mock_error.call_args_list[0][0][3], 'http://url/')
self.assertIsInstance(mock_error.call_args_list[0][0][4], KeyError)
mock_delete.assert_not_called()
| 53.054852
| 119
| 0.67397
| 3,234
| 25,148
| 4.97248
| 0.101422
| 0.039177
| 0.056775
| 0.038057
| 0.811517
| 0.781917
| 0.769417
| 0.731173
| 0.700578
| 0.681923
| 0
| 0.023462
| 0.191546
| 25,148
| 473
| 120
| 53.167019
| 0.767498
| 0.103507
| 0
| 0.597668
| 0
| 0.03207
| 0.227169
| 0.040933
| 0
| 0
| 0
| 0
| 0.41691
| 1
| 0.104956
| false
| 0.03207
| 0.03207
| 0
| 0.145773
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
420af510edeea16dff7213302927b0c439ba6455
| 110
|
py
|
Python
|
tolteca/datamodels/io/toltec/__init__.py
|
dennis-l/tolteca
|
1dffaffb585eb7027e26b34ae01e8632bef134cb
|
[
"BSD-3-Clause"
] | 2
|
2021-09-28T18:51:37.000Z
|
2021-12-28T00:25:51.000Z
|
tolteca/datamodels/io/toltec/__init__.py
|
dennis-l/tolteca
|
1dffaffb585eb7027e26b34ae01e8632bef134cb
|
[
"BSD-3-Clause"
] | 2
|
2021-11-04T22:32:03.000Z
|
2022-01-11T21:40:34.000Z
|
tolteca/datamodels/io/toltec/__init__.py
|
dennis-l/tolteca
|
1dffaffb585eb7027e26b34ae01e8632bef134cb
|
[
"BSD-3-Clause"
] | 2
|
2021-07-23T14:00:51.000Z
|
2021-07-27T15:34:48.000Z
|
#! /usr/bin/env python
from .kidsdata import * # noqa: F401, F403
from .table import * # noqa: F401, F403
| 18.333333
| 43
| 0.654545
| 16
| 110
| 4.5
| 0.6875
| 0.277778
| 0.388889
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 0.209091
| 110
| 5
| 44
| 22
| 0.689655
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
423d92907b9af07a350dc39e2c3296a0d2432a9d
| 3,376
|
py
|
Python
|
pipe_anchorages/schema/port_visit.py
|
GlobalFishingWatch/anchorages_pipeline
|
88764545b693bfb65fc7a7f62a344fb2afbc3d97
|
[
"Apache-2.0"
] | 3
|
2017-12-22T10:19:15.000Z
|
2020-04-20T10:28:43.000Z
|
pipe_anchorages/schema/port_visit.py
|
GlobalFishingWatch/anchorages_pipeline
|
88764545b693bfb65fc7a7f62a344fb2afbc3d97
|
[
"Apache-2.0"
] | 32
|
2017-12-06T13:01:46.000Z
|
2022-03-30T22:52:04.000Z
|
pipe_anchorages/schema/port_visit.py
|
GlobalFishingWatch/anchorages_pipeline
|
88764545b693bfb65fc7a7f62a344fb2afbc3d97
|
[
"Apache-2.0"
] | 3
|
2018-01-21T14:07:58.000Z
|
2021-07-28T16:02:20.000Z
|
from .utils import SchemaBuilder
from .port_event import build as build_port_event_schema
def build():
builder = SchemaBuilder()
builder.add("visit_id", "STRING",
description="Unique ID for this visit")
builder.add("vessel_id", "STRING",
description="`vessel_id` of the track this visit was found on")
builder.add("ssvid", "STRING",
description="`ssvid` of the vessel involved in the visit."
"N.B. Some `ssvid` may be associated with multiple tracks")
builder.add("start_timestamp", "TIMESTAMP",
description="timestamp at which vessel crossed into the anchorage")
builder.add("start_lat", "FLOAT",
description="latitude of vessel at `start_timestamp`")
builder.add("start_lon", "FLOAT",
description="longitude of vessel at `start_timestamp`")
builder.add("start_anchorage_id", "STRING",
description="`anchorage_id` of anchorage where vessel entered port")
builder.add("end_timestamp", "TIMESTAMP",
description="timestamp at which vessel crossed out the anchorage.")
builder.add("end_lat", "FLOAT",
description="latitude of vessel at `end_timestamp`")
builder.add("end_lon", "FLOAT",
description="longitude of vessel at `end_timestamp`")
builder.add('duration_hrs', "FLOAT",
description='duration of visit in hours')
builder.add("end_anchorage_id", "STRING",
description="longitude of vessel at `end_timestamp`")
builder.add("confidence", "INTEGER",
description="""How confident are we that this is a real visit based on components of the visits:
1 -> no stop or gap; only an entry and/or exit
2 -> only stop and/or gap; no entry or exit
3 -> port entry or exit with stop and/or gap
4 -> port entry and exit with stop and/or gap"""
)
builder.add("events", mode="REPEATED",
schema_type=build_port_event_schema().fields,
description="sequence of port events that occurred during visit"
)
return builder.schema
def build_compatibility():
builder = SchemaBuilder()
builder.add("visit_id", "STRING",
description="Unique ID for this visit")
builder.add("vessel_id", "STRING",
description="`vessel_id` of the track this visit was found on")
builder.add("start_timestamp", "TIMESTAMP",
description="timestamp at which vessel crossed into the anchorage")
builder.add("start_lat", "FLOAT",
description="latitude of vessel at `start_timestamp`")
builder.add("start_lon", "FLOAT",
description="longitude of vessel at `start_timestamp`")
builder.add("start_anchorage_id", "STRING",
description="`anchorage_id` of anchorage where vessel entered port")
builder.add("end_timestamp", "TIMESTAMP",
description="timestamp at which vessel crossed out the anchorage.")
builder.add("end_lat", "FLOAT",
description="latitude of vessel at `end_timestamp`")
builder.add("end_lon", "FLOAT",
description="longitude of vessel at `end_timestamp`")
builder.add("end_anchorage_id", "STRING",
description="longitude of vessel at `end_timestamp`")
builder.add("events", mode="REPEATED",
schema_type=build_port_event_schema().fields,
description="sequence of port events that occurred during visit"
)
return builder.schema
| 44.421053
| 104
| 0.682464
| 429
| 3,376
| 5.249417
| 0.216783
| 0.111012
| 0.044405
| 0.0746
| 0.808171
| 0.808171
| 0.790409
| 0.790409
| 0.790409
| 0.790409
| 0
| 0.001487
| 0.203199
| 3,376
| 75
| 105
| 45.013333
| 0.835688
| 0
| 0
| 0.735294
| 0
| 0
| 0.523104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.029412
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
429b2710f937b9d294b54dbc02eb23fc72eb5874
| 2,808
|
py
|
Python
|
tzager/get_connections.py
|
tzagerAI/tzager
|
a6787f02fde58babd9999867d2cc3ced94926da8
|
[
"MIT"
] | 2
|
2021-01-25T17:05:59.000Z
|
2021-04-11T19:05:16.000Z
|
tzager/get_connections.py
|
tzagerAI/tzager
|
a6787f02fde58babd9999867d2cc3ced94926da8
|
[
"MIT"
] | null | null | null |
tzager/get_connections.py
|
tzagerAI/tzager
|
a6787f02fde58babd9999867d2cc3ced94926da8
|
[
"MIT"
] | null | null | null |
import json
import requests
def anatomies(password, concepts_list, pmids=[], filters=[]):
response = requests.post('https://intoolab.ai/get_anatomies/' + password, json=json.dumps({'concepts_list':concepts_list, 'pmids': pmids, 'filters': filters}))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def diseases(password, concepts_list, pmids=[], filters=[]):
response = requests.post('https://intoolab.ai/get_diseases/' + password, json=json.dumps({'concepts_list':concepts_list, 'pmids': pmids, 'filters': filters}))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def symptoms(password, concepts_list, pmids=[], filters=[]):
response = requests.post('https://intoolab.ai/get_symptoms/' + password, json=json.dumps({'concepts_list':concepts_list, 'pmids': pmids, 'filters': filters}))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def organisms(password, concepts_list, pmids=[], filters=[]):
response = requests.post('https://intoolab.ai/get_organisms/' + password, json=json.dumps({'concepts_list':concepts_list, 'pmids': pmids, 'filters': filters}))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def therapies(password, concepts_list, pmids=[], filters=[]):
response = requests.post('https://intoolab.ai/get_therapies/' + password, json=json.dumps({'concepts_list':concepts_list, 'pmids': pmids, 'filters': filters}))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def phenomena(password, concepts_list, pmids=[], filters=[]):
response = requests.post('https://intoolab.ai/get_phenomena/' + password, json=json.dumps({'concepts_list':concepts_list, 'pmids': pmids, 'filters': filters}))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
def genes(password, concepts_list, pmids=[], filters=[]):
response = requests.post('https://intoolab.ai/get_genes/' + password, json=json.dumps({'concepts_list':concepts_list, 'pmids': pmids, 'filters': filters}))
if response.status_code == 200:
data = dict(response.json())
else:
data = {'error': response.status_code}
data = dict(data)
return data
| 43.2
| 163
| 0.657407
| 333
| 2,808
| 5.417417
| 0.096096
| 0.13969
| 0.131929
| 0.097007
| 0.92184
| 0.92184
| 0.92184
| 0.92184
| 0.92184
| 0.92184
| 0
| 0.009154
| 0.183048
| 2,808
| 65
| 164
| 43.2
| 0.777245
| 0
| 0
| 0.724138
| 0
| 0
| 0.157351
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12069
| false
| 0.241379
| 0.034483
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
35fd46c91996d83204e7e92ff09d957785e6d654
| 104
|
py
|
Python
|
src/ctc/rpc/rpc_batch/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 94
|
2022-02-15T19:34:49.000Z
|
2022-03-26T19:26:22.000Z
|
src/ctc/rpc/rpc_batch/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-03-03T02:58:47.000Z
|
2022-03-11T18:41:05.000Z
|
src/ctc/rpc/rpc_batch/__init__.py
|
fei-protocol/checkthechain
|
ec838f3d0d44af228f45394d9ba8d8eb7f677520
|
[
"MIT"
] | 7
|
2022-02-15T17:53:07.000Z
|
2022-03-17T19:14:17.000Z
|
from .rpc_batch_constructors import *
from .rpc_batch_executors import *
from .rpc_batch_utils import *
| 26
| 37
| 0.826923
| 15
| 104
| 5.333333
| 0.466667
| 0.2625
| 0.45
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 104
| 3
| 38
| 34.666667
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c42740afee233cbd95fb93995925e67c2f0bcb8e
| 96
|
py
|
Python
|
trainer/datasets/__init__.py
|
jason-zl190/sisr
|
2415d28333c94602c52be9c314a8044165d992cf
|
[
"Apache-2.0"
] | 2
|
2019-12-15T17:12:46.000Z
|
2019-12-15T21:09:31.000Z
|
trainer/datasets/__init__.py
|
jason-zl190/sisr
|
2415d28333c94602c52be9c314a8044165d992cf
|
[
"Apache-2.0"
] | null | null | null |
trainer/datasets/__init__.py
|
jason-zl190/sisr
|
2415d28333c94602c52be9c314a8044165d992cf
|
[
"Apache-2.0"
] | 1
|
2020-12-15T15:30:12.000Z
|
2020-12-15T15:30:12.000Z
|
from trainer.datasets.oxford_iiit_pet import oxford_iiit_pet_dataset, oxford_iiit_pet_dataset_D
| 48
| 95
| 0.916667
| 16
| 96
| 4.9375
| 0.5625
| 0.379747
| 0.493671
| 0.506329
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052083
| 96
| 1
| 96
| 96
| 0.868132
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
6711415927ceb821eac098cbd7ee31df6371f65b
| 37
|
py
|
Python
|
splitcli/splitio_selectors/organization_selectors.py
|
stephencsnow/splitcli
|
f0b9a451215bb052c91e4802bd6d0dcca0407dab
|
[
"Apache-2.0"
] | 36
|
2021-03-14T19:46:24.000Z
|
2021-05-20T22:57:00.000Z
|
splitcli/splitio_selectors/organization_selectors.py
|
stephencsnow/splitcli
|
f0b9a451215bb052c91e4802bd6d0dcca0407dab
|
[
"Apache-2.0"
] | 2
|
2021-04-02T22:04:23.000Z
|
2021-04-06T20:45:39.000Z
|
splitcli/splitio_selectors/organization_selectors.py
|
stephencsnow/splitcli
|
f0b9a451215bb052c91e4802bd6d0dcca0407dab
|
[
"Apache-2.0"
] | 2
|
2021-03-27T16:16:50.000Z
|
2021-06-18T21:00:18.000Z
|
def manage_organization():
return
| 18.5
| 26
| 0.756757
| 4
| 37
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162162
| 37
| 2
| 27
| 18.5
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
6716d5712fd818d568fade7b11cfc513ba1b666c
| 20,011
|
py
|
Python
|
keepit/db_analysis.py
|
franklinmatheus/sistema-gerenciamento-economico
|
6e2565eedd644469d22da3ea58141bcedb46da97
|
[
"Apache-2.0"
] | null | null | null |
keepit/db_analysis.py
|
franklinmatheus/sistema-gerenciamento-economico
|
6e2565eedd644469d22da3ea58141bcedb46da97
|
[
"Apache-2.0"
] | 1
|
2019-06-10T23:49:34.000Z
|
2020-09-23T01:10:06.000Z
|
keepit/db_analysis.py
|
franklinmatheus/sistema-gerenciamento-economico
|
6e2565eedd644469d22da3ea58141bcedb46da97
|
[
"Apache-2.0"
] | 1
|
2020-07-20T07:20:15.000Z
|
2020-07-20T07:20:15.000Z
|
from keepit.db import get_db
def get_balance(id_user: int):
db = get_db()
cursor = db.cursor(dictionary=True)
select_query = ('''SELECT (receitas_comuns.total + receitas_incomuns.total) - (despesas_comuns.total + despesas_incomuns.total) saldo FROM
(SELECT COALESCE(SUM(keepit.pagamento_recurso.valor),0) total FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso = keepit.despesa.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.despesa_comum ON keepit.despesa.id_despesa = keepit.despesa_comum.id_despesa)
WHERE keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.id_usuario=%s) AS despesas_comuns,
(SELECT COALESCE(SUM(keepit.pagamento_recurso.valor),0) total FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso=keepit.despesa.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.despesa_incomum ON keepit.despesa.id_despesa = keepit.despesa_incomum.id_despesa)
WHERE keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.data_cancelamento IS NOT NULL
AND keepit.recurso.id_usuario=%s) AS despesas_incomuns,
(SELECT COALESCE(SUM(keepit.pagamento_recurso.valor),0) total FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso=keepit.receita.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.receita_comum ON keepit.receita.id_receita = keepit.receita_comum.id_receita)
WHERE keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.id_usuario=%s) AS receitas_comuns,
(SELECT COALESCE(SUM(keepit.pagamento_recurso.valor),0) total FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso=keepit.receita.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.receita_incomum ON keepit.receita.id_receita = keepit.receita_incomum.id_receita)
WHERE keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.data_cancelamento IS NOT NULL
AND keepit.recurso.id_usuario=%s) AS receitas_incomuns
''')
select_data = (id_user,id_user,id_user,id_user)
cursor.execute(select_query,select_data)
result = cursor.fetchone()
cursor.close()
db.close()
return result['saldo']
def get_expenses_info(id_user: int, month: int, year: int):
db = get_db()
cursor = db.cursor(dictionary=True)
info = {'comum':{'desatualizadas':0,'quantidade':0,'total':0}
,'incomum':{'quantidade':0,'total':0}}
select_query = ('''SELECT COUNT(DISTINCT(keepit.recurso.id_recurso)) quantidade,
SUM(keepit.pagamento_recurso.valor) total FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso=keepit.despesa.id_recurso)
JOIN keepit.despesa_comum ON keepit.despesa.id_despesa=keepit.despesa_comum.id_despesa)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
WHERE keepit.recurso.id_usuario=%s and keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND MONTH(keepit.pagamento_recurso.data_pagamento) = %s AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY keepit.recurso.id_usuario
''')
select_data = (id_user,month,year)
cursor.execute(select_query,select_data)
quantity_info = cursor.fetchone()
if quantity_info is not None:
info['comum']['quantidade'] = quantity_info['quantidade']
info['comum']['total'] = quantity_info['total']
select_query = ('''SELECT COUNT(*) desatualizadas FROM
((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso=keepit.despesa.id_recurso)
JOIN keepit.despesa_comum ON keepit.despesa.id_despesa=keepit.despesa_comum.id_despesa)
WHERE keepit.recurso.id_usuario=%s AND keepit.despesa_comum.status=0
''')
select_data = (id_user,)
cursor.execute(select_query,select_data)
late_info = cursor.fetchone()
if late_info is not None:
info['comum']['desatualizadas'] = late_info['desatualizadas']
select_query = ('''SELECT COUNT(DISTINCT(keepit.recurso.id_recurso)) quantidade,
SUM(keepit.pagamento_recurso.valor) total FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso=keepit.despesa.id_recurso)
JOIN keepit.despesa_incomum ON keepit.despesa.id_despesa=keepit.despesa_incomum.id_despesa)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
WHERE keepit.recurso.id_usuario=%s and keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.data_cancelamento IS NULL
AND MONTH(keepit.pagamento_recurso.data_pagamento) = %s AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY keepit.recurso.id_usuario
''')
select_data = (id_user,month,year)
cursor.execute(select_query,select_data)
quantity_info = cursor.fetchone()
if quantity_info is not None:
info['incomum']['quantidade'] = quantity_info['quantidade']
info['incomum']['total'] = quantity_info['total']
cursor.close()
db.close()
return info
def get_revenues_info(id_user: int, month: int, year: int):
db = get_db()
cursor = db.cursor(dictionary=True)
info = {'comum':{'desatualizadas':0,'quantidade':0,'total':0}
,'incomum':{'quantidade':0,'total':0}}
select_query = ('''SELECT COUNT(DISTINCT(keepit.recurso.id_recurso)) quantidade,
SUM(keepit.pagamento_recurso.valor) total FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso=keepit.receita.id_recurso)
JOIN keepit.receita_comum ON keepit.receita.id_receita=keepit.receita_comum.id_receita)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
WHERE keepit.recurso.id_usuario=%s and keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND MONTH(keepit.pagamento_recurso.data_pagamento) = %s AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY keepit.recurso.id_usuario
''')
select_data = (id_user,month,year)
cursor.execute(select_query,select_data)
quantity_info = cursor.fetchone()
if quantity_info is not None:
info['comum']['quantidade'] = quantity_info['quantidade']
info['comum']['total'] = quantity_info['total']
select_query = ('''SELECT COUNT(*) desatualizadas FROM
((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso=keepit.receita.id_recurso)
JOIN keepit.receita_comum ON keepit.receita.id_receita=keepit.receita_comum.id_receita)
WHERE keepit.recurso.id_usuario=%s AND keepit.receita_comum.status=0
''')
select_data = (id_user,)
cursor.execute(select_query,select_data)
late_info = cursor.fetchone()
if late_info is not None:
info['comum']['desatualizadas'] = late_info['desatualizadas']
select_query = ('''SELECT COUNT(DISTINCT(keepit.recurso.id_recurso)) quantidade,
SUM(keepit.pagamento_recurso.valor) total FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso=keepit.receita.id_recurso)
JOIN keepit.receita_incomum ON keepit.receita.id_receita=keepit.receita_incomum.id_receita)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
WHERE keepit.recurso.id_usuario=%s and keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND MONTH(keepit.pagamento_recurso.data_pagamento) = %s AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
AND keepit.recurso.data_cancelamento IS NULL
GROUP BY keepit.recurso.id_usuario
''')
select_data = (id_user,month,year)
cursor.execute(select_query,select_data)
quantity_info = cursor.fetchone()
if quantity_info is not None:
info['incomum']['quantidade'] = quantity_info['quantidade']
info['incomum']['total'] = quantity_info['total']
cursor.close()
db.close()
return info
def get_total_expenses_by_day(id_user: int):
db = get_db()
cursor = db.cursor(dictionary=True)
select_query = ('''SELECT despesas.data_pagamento, COUNT(*) quantidade, SUM(despesas.valor) total FROM (
(SELECT keepit.pagamento_recurso.data_pagamento, keepit.pagamento_recurso.valor FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso = keepit.despesa.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.despesa_comum ON keepit.despesa.id_despesa = keepit.despesa_comum.id_despesa)
WHERE keepit.recurso.id_usuario=%s
AND keepit.pagamento_recurso.data_pagamento IS NOT NULL)
UNION
(SELECT keepit.pagamento_recurso.data_pagamento, keepit.pagamento_recurso.valor FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso = keepit.despesa.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.despesa_incomum ON keepit.despesa.id_despesa = keepit.despesa_incomum.id_despesa)
WHERE keepit.recurso.id_usuario=%s
AND keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.data_cancelamento IS NULL)) despesas
GROUP BY despesas.data_pagamento
''')
select_data = (id_user,id_user)
cursor.execute(select_query,select_data)
results = cursor.fetchall()
cursor.close()
db.close()
return results
def get_total_revenues_by_day(id_user: int):
db = get_db()
cursor = db.cursor(dictionary=True)
select_query = ('''SELECT receitas.data_pagamento, COUNT(*) quantidade, SUM(receitas.valor) total FROM (
(SELECT keepit.pagamento_recurso.data_pagamento, keepit.pagamento_recurso.valor FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso = keepit.receita.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.receita_comum ON keepit.receita.id_receita = keepit.receita_comum.id_receita)
WHERE keepit.recurso.id_usuario=%s
AND keepit.pagamento_recurso.data_pagamento IS NOT NULL)
UNION
(SELECT keepit.pagamento_recurso.data_pagamento, keepit.pagamento_recurso.valor FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso = keepit.receita.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.receita_incomum ON keepit.receita.id_receita = keepit.receita_incomum.id_receita)
WHERE keepit.recurso.id_usuario=%s
AND keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.data_cancelamento IS NULL)) receitas
GROUP BY receitas.data_pagamento
''')
select_data = (id_user,id_user)
cursor.execute(select_query,select_data)
results = cursor.fetchall()
cursor.close()
db.close()
return results
def get_total_expenses_by_month(id_user: int, year: int):
db = get_db()
cursor = db.cursor(dictionary=True)
select_query = ('''SELECT * FROM
(SELECT MONTH(keepit.pagamento_recurso.data_pagamento) mes_comum, SUM(keepit.pagamento_recurso.valor) total_comum FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso = keepit.despesa.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.despesa_comum ON keepit.despesa.id_despesa = keepit.despesa_comum.id_despesa)
WHERE keepit.recurso.id_usuario=%s AND keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY MONTH(keepit.pagamento_recurso.data_pagamento)) despesas_comuns
RIGHT JOIN
(SELECT MONTH(keepit.pagamento_recurso.data_pagamento) mes_incomum, SUM(keepit.pagamento_recurso.valor) total_incomum FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso=keepit.despesa.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.despesa_incomum ON keepit.despesa.id_despesa = keepit.despesa_incomum.id_despesa)
WHERE keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.data_cancelamento IS NULL
AND keepit.recurso.id_usuario=%s
AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY MONTH(keepit.pagamento_recurso.data_pagamento)) despesas_incomuns
ON despesas_comuns.mes_comum = despesas_incomuns.mes_incomum
UNION
SELECT * FROM
(SELECT MONTH(keepit.pagamento_recurso.data_pagamento) mes_comum, SUM(keepit.pagamento_recurso.valor) total_comum FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso = keepit.despesa.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.despesa_comum ON keepit.despesa.id_despesa = keepit.despesa_comum.id_despesa)
WHERE keepit.recurso.id_usuario=%s AND keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY MONTH(keepit.pagamento_recurso.data_pagamento)) despesas_comuns
LEFT JOIN
(SELECT MONTH(keepit.pagamento_recurso.data_pagamento) mes_incomum, SUM(keepit.pagamento_recurso.valor) total_incomum FROM
(((keepit.recurso JOIN keepit.despesa ON keepit.recurso.id_recurso=keepit.despesa.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.despesa_incomum ON keepit.despesa.id_despesa = keepit.despesa_incomum.id_despesa)
WHERE keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.data_cancelamento IS NULL
AND keepit.recurso.id_usuario=%s
AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY MONTH(keepit.pagamento_recurso.data_pagamento)) despesas_incomuns
ON despesas_comuns.mes_comum = despesas_incomuns.mes_incomum
''')
select_data = (id_user,year,id_user,year,id_user,year,id_user,year)
cursor.execute(select_query,select_data)
results = cursor.fetchall()
cursor.close()
db.close()
for result in results:
if result['total_comum'] is not None and result['total_incomum'] is not None:
result['total'] = result['total_comum'] + result['total_incomum']
elif result['total_comum'] is not None:
result['total'] = result['total_comum']
elif result['total_incomum'] is not None:
result['total'] = result['total_incomum']
if result['mes_comum'] is not None:
result['mes'] = result['mes_comum']
elif result['mes_incomum'] is not None:
result['mes'] = result['mes_incomum']
del result['total_incomum']
del result['mes_incomum']
del result['total_comum']
del result['mes_comum']
return results
def get_total_revenues_by_month(id_user: int, year: int):
db = get_db()
cursor = db.cursor(dictionary=True)
select_query = ('''SELECT * FROM
(SELECT MONTH(keepit.pagamento_recurso.data_pagamento) mes_comum, SUM(keepit.pagamento_recurso.valor) total_comum FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso = keepit.receita.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.receita_comum ON keepit.receita.id_receita = keepit.receita_comum.id_receita)
WHERE keepit.recurso.id_usuario=%s AND keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY MONTH(keepit.pagamento_recurso.data_pagamento)) receitas_comuns
RIGHT JOIN
(SELECT MONTH(keepit.pagamento_recurso.data_pagamento) mes_incomum, SUM(keepit.pagamento_recurso.valor) total_incomum FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso=keepit.receita.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.receita_incomum ON keepit.receita.id_receita = keepit.receita_incomum.id_receita)
WHERE keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.data_cancelamento IS NULL
AND keepit.recurso.id_usuario=%s
AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY MONTH(keepit.pagamento_recurso.data_pagamento)) receitas_incomuns
ON receitas_comuns.mes_comum = receitas_incomuns.mes_incomum
UNION
SELECT * FROM
(SELECT MONTH(keepit.pagamento_recurso.data_pagamento) mes_comum, SUM(keepit.pagamento_recurso.valor) total_comum FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso = keepit.receita.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.receita_comum ON keepit.receita.id_receita = keepit.receita_comum.id_receita)
WHERE keepit.recurso.id_usuario=%s AND keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY MONTH(keepit.pagamento_recurso.data_pagamento)) receitas_comuns
LEFT JOIN
(SELECT MONTH(keepit.pagamento_recurso.data_pagamento) mes_incomum, SUM(keepit.pagamento_recurso.valor) total_incomum FROM
(((keepit.recurso JOIN keepit.receita ON keepit.recurso.id_recurso=keepit.receita.id_recurso)
JOIN keepit.pagamento_recurso ON keepit.pagamento_recurso.id_recurso = keepit.recurso.id_recurso)
JOIN keepit.receita_incomum ON keepit.receita.id_receita = keepit.receita_incomum.id_receita)
WHERE keepit.pagamento_recurso.data_pagamento IS NOT NULL
AND keepit.recurso.data_cancelamento IS NULL
AND keepit.recurso.id_usuario=%s
AND YEAR(keepit.pagamento_recurso.data_pagamento) = %s
GROUP BY MONTH(keepit.pagamento_recurso.data_pagamento)) receitas_incomuns
ON receitas_comuns.mes_comum = receitas_incomuns.mes_incomum
''')
select_data = (id_user,year,id_user,year,id_user,year,id_user,year)
cursor.execute(select_query,select_data)
results = cursor.fetchall()
cursor.close()
db.close()
for result in results:
if result['total_comum'] is not None and result['total_incomum'] is not None:
result['total'] = result['total_comum'] + result['total_incomum']
elif result['total_comum'] is not None:
result['total'] = result['total_comum']
elif result['total_incomum'] is not None:
result['total'] = result['total_incomum']
if result['mes_comum'] is not None:
result['mes'] = result['mes_comum']
elif result['mes_incomum'] is not None:
result['mes'] = result['mes_incomum']
del result['total_incomum']
del result['mes_incomum']
del result['total_comum']
del result['mes_comum']
return results
| 56.053221
| 143
| 0.724052
| 2,613
| 20,011
| 5.317643
| 0.028703
| 0.125225
| 0.183663
| 0.104786
| 0.980569
| 0.972868
| 0.971285
| 0.966607
| 0.966607
| 0.966607
| 0
| 0.000978
| 0.182799
| 20,011
| 357
| 144
| 56.053221
| 0.848713
| 0
| 0
| 0.917722
| 0
| 0.110759
| 0.768489
| 0.456676
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022152
| false
| 0
| 0.003165
| 0
| 0.047468
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
67da635a7a56f93caaa89e86fe008688ecde3db6
| 23,941
|
py
|
Python
|
mpf/tests/test_ComboSwitches.py
|
cloudjor/mpf
|
1cf6bf18b0d81120383b0b128b0ebbfa1c62717c
|
[
"MIT"
] | null | null | null |
mpf/tests/test_ComboSwitches.py
|
cloudjor/mpf
|
1cf6bf18b0d81120383b0b128b0ebbfa1c62717c
|
[
"MIT"
] | null | null | null |
mpf/tests/test_ComboSwitches.py
|
cloudjor/mpf
|
1cf6bf18b0d81120383b0b128b0ebbfa1c62717c
|
[
"MIT"
] | null | null | null |
from mpf.tests.MpfTestCase import MpfTestCase
from unittest.mock import MagicMock
class TestComboSwitches(MpfTestCase):
def getConfigFile(self):
return 'combo_switches.yaml'
def getMachinePath(self):
return 'tests/machine_files/combo_switches/'
def test_tag_combo(self):
self.mock_event('tag_combo_both')
self.mock_event('tag_combo_inactive')
self.mock_event('tag_combo_one')
self.hit_switch_and_run('switch5', 1)
self.assertEventNotCalled('tag_combo_both')
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventNotCalled('tag_combo_one')
self.assertEqual(self.machine.combo_switches.tag_combo.state, 'inactive')
self.hit_switch_and_run('switch7', 1)
self.assertEventCalled('tag_combo_both')
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventNotCalled('tag_combo_one')
self.assertEqual(self.machine.combo_switches.tag_combo.state, 'both')
self.release_switch_and_run('switch7', 1)
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventCalled('tag_combo_one')
self.assertEqual(self.machine.combo_switches.tag_combo.state, 'one')
self.mock_event('tag_combo_both')
self.mock_event('tag_combo_inactive')
self.mock_event('tag_combo_one')
self.hit_switch_and_run('switch7', 1)
self.assertEventCalled('tag_combo_both')
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventNotCalled('tag_combo_one')
self.release_switch_and_run('switch5', 1)
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventCalled('tag_combo_one')
self.release_switch_and_run('switch7', 1)
self.assertEventCalled('tag_combo_inactive')
# now make sure it all works with the switches in the other order
self.mock_event('tag_combo_both')
self.mock_event('tag_combo_inactive')
self.mock_event('tag_combo_one')
self.hit_switch_and_run('switch7', 1)
self.assertEventNotCalled('tag_combo_both')
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventNotCalled('tag_combo_one')
self.hit_switch_and_run('switch5', 1)
self.assertEventCalled('tag_combo_both')
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventNotCalled('tag_combo_one')
self.release_switch_and_run('switch5', 1)
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventCalled('tag_combo_one')
self.mock_event('tag_combo_both')
self.mock_event('tag_combo_inactive')
self.mock_event('tag_combo_one')
self.hit_switch_and_run('switch5', 1)
self.assertEventCalled('tag_combo_both')
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventNotCalled('tag_combo_one')
self.release_switch_and_run('switch7', 1)
self.assertEventNotCalled('tag_combo_inactive')
self.assertEventCalled('tag_combo_one')
self.release_switch_and_run('switch5', 1)
self.assertEventCalled('tag_combo_inactive')
def test_switch_combo(self):
self.mock_event('switch_combo_both')
self.mock_event('switch_combo_inactive')
self.mock_event('switch_combo_one')
self.hit_switch_and_run('switch1', 1)
self.assertEventNotCalled('switch_combo_both')
self.assertEventNotCalled('switch_combo_inactive')
self.assertEventNotCalled('switch_combo_one')
self.hit_switch_and_run('switch2', 1)
self.assertEventCalled('switch_combo_both')
self.assertEventNotCalled('switch_combo_inactive')
self.assertEventNotCalled('switch_combo_one')
self.release_switch_and_run('switch2', 1)
self.assertEventNotCalled('switch_combo_inactive')
self.assertEventCalled('switch_combo_one')
self.mock_event('switch_combo_both')
self.mock_event('switch_combo_inactive')
self.mock_event('switch_combo_one')
self.hit_switch_and_run('switch2', 1)
self.assertEventCalled('switch_combo_both')
self.assertEventNotCalled('switch_combo_inactive')
self.assertEventNotCalled('switch_combo_one')
self.release_switch_and_run('switch1', 1)
self.assertEventNotCalled('switch_combo_inactive')
self.assertEventCalled('switch_combo_one')
self.release_switch_and_run('switch2', 1)
self.assertEventCalled('switch_combo_inactive')
# test long offset time
self.mock_event('switch_combo_both')
self.mock_event('switch_combo_inactive')
self.mock_event('switch_combo_one')
self.hit_switch_and_run('switch1', 100)
self.assertEventNotCalled('switch_combo_both')
self.hit_switch_and_run('switch2', .1)
self.assertEventCalled('switch_combo_both')
def test_multiple_switch_combo(self):
# first test the basics with multiple switches listed
self.mock_event('multiple_switch_combo_both')
self.mock_event('multiple_switch_combo_inactive')
self.mock_event('multiple_switch_combo_one')
self.hit_switch_and_run('switch1', 1)
self.assertEventNotCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventNotCalled('multiple_switch_combo_one')
self.hit_switch_and_run('switch3', 1)
self.assertEventCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventNotCalled('multiple_switch_combo_one')
self.release_switch_and_run('switch3', 1)
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventCalled('multiple_switch_combo_one')
self.mock_event('multiple_switch_combo_both')
self.mock_event('multiple_switch_combo_inactive')
self.mock_event('multiple_switch_combo_one')
self.hit_switch_and_run('switch3', 1)
self.assertEventCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventNotCalled('multiple_switch_combo_one')
self.release_switch_and_run('switch1', 1)
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventCalled('multiple_switch_combo_one')
self.release_switch_and_run('switch3', 1)
self.assertEventCalled('multiple_switch_combo_inactive')
# now start playing with combinations of switches from the same group
self.mock_event('multiple_switch_combo_both')
self.mock_event('multiple_switch_combo_inactive')
self.mock_event('multiple_switch_combo_one')
# hit switch 1, nothing happens
self.hit_switch_and_run('switch1', 1)
self.assertEventNotCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventNotCalled('multiple_switch_combo_one')
# hit switch 2, which is in group 1, so still nothing happens
self.hit_switch_and_run('switch2', 1)
self.assertEventNotCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventNotCalled('multiple_switch_combo_one')
# hit switch 3, which is in group 2, so we're active
self.hit_switch_and_run('switch3', 1)
self.assertEventCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventNotCalled('multiple_switch_combo_one')
self.mock_event('multiple_switch_combo_both')
self.mock_event('multiple_switch_combo_inactive')
self.mock_event('multiple_switch_combo_one')
# hit switch 4, in group 2, so nothing happens
self.hit_switch_and_run('switch4', 1)
self.assertEventNotCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventNotCalled('multiple_switch_combo_one')
# release switch 3, but switch 4 is still active, so nothing happens
self.release_switch_and_run('switch3', 1)
self.assertEventNotCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventNotCalled('multiple_switch_combo_one')
# release switch 2, but switch 1 is still active, so nothing happens
self.release_switch_and_run('switch2', 1)
self.assertEventNotCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventNotCalled('multiple_switch_combo_one')
# release switch 1, the last from group 1, so now we have the one event
self.release_switch_and_run('switch1', 1)
self.assertEventNotCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventCalled('multiple_switch_combo_one')
# hit switch 2, so now we go back to combo active
self.hit_switch_and_run('switch2', 1)
self.assertEventCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventCalled('multiple_switch_combo_one')
self.mock_event('multiple_switch_combo_both')
self.mock_event('multiple_switch_combo_inactive')
self.mock_event('multiple_switch_combo_one')
# release switch 4, so we go back to one is active
self.release_switch_and_run('switch4', 1)
self.assertEventNotCalled('multiple_switch_combo_both')
self.assertEventNotCalled('multiple_switch_combo_inactive')
self.assertEventCalled('multiple_switch_combo_one')
# release switch 2, so back to none are active
self.release_switch_and_run('switch2', 1)
self.assertEventNotCalled('multiple_switch_combo_both')
self.assertEventCalled('multiple_switch_combo_inactive')
self.assertEventCalled('multiple_switch_combo_one')
def test_custom_offset(self):
# we have a 1s offset
self.mock_event('custom_offset_both')
self.mock_event('custom_offset_inactive')
self.mock_event('custom_offset_one')
self.mock_event('custom_offset_switches_1')
self.mock_event('custom_offset_switches_2')
self.hit_switch_and_run('switch1', .1)
self.assertEventNotCalled('custom_offset_switches_1')
self.assertEventNotCalled('custom_offset_switches_2')
self.advance_time_and_run(1.9)
self.assertEventNotCalled('custom_offset_both')
self.assertEventNotCalled('custom_offset_inactive')
self.assertEventNotCalled('custom_offset_one')
self.assertEventCalled('custom_offset_switches_1')
self.assertEventNotCalled('custom_offset_switches_2')
self.mock_event('custom_offset_switches_1')
# switch2 in more than 1s offset time, so events should not be posted
self.hit_switch_and_run('switch2', 1)
self.assertEventNotCalled('custom_offset_both')
self.assertEventNotCalled('custom_offset_inactive')
self.assertEventNotCalled('custom_offset_one')
self.assertEventNotCalled('custom_offset_switches_1')
self.assertEventNotCalled('custom_offset_switches_2')
self.release_switch_and_run('switch1', .1)
self.release_switch_and_run('switch2', .1)
# now hit both of the switches in < 1s
self.hit_switch_and_run('switch1', .1)
self.hit_switch_and_run('switch2', .1)
self.assertEventCalled('custom_offset_both')
self.advance_time_and_run(10)
self.assertEventNotCalled('custom_offset_inactive')
self.assertEventNotCalled('custom_offset_one')
self.assertEventNotCalled('custom_offset_switches_1')
self.assertEventNotCalled('custom_offset_switches_2')
def test_custom_hold(self):
# we have a 1s hold time
self.mock_event('custom_hold_both')
self.mock_event('custom_hold_inactive')
self.mock_event('custom_hold_one')
self.hit_switch_and_run('switch1', 5)
self.hit_switch_and_run('switch2', .5)
self.assertEventNotCalled('custom_hold_both')
self.assertEventNotCalled('custom_hold_inactive')
self.assertEventNotCalled('custom_hold_one')
# advance more than 1s from the first switch, nothing should happen
self.advance_time_and_run(.1)
self.assertEventNotCalled('custom_hold_both')
self.assertEventNotCalled('custom_hold_inactive')
self.assertEventNotCalled('custom_hold_one')
# advance more than 1s from the second switch
self.advance_time_and_run(.6)
self.assertEventCalled('custom_hold_both')
self.assertEventNotCalled('custom_hold_inactive')
self.assertEventNotCalled('custom_hold_one')
# release one of the switches, one should be posted
self.release_switch_and_run('switch2', .1)
self.assertEventNotCalled('custom_hold_inactive')
self.assertEventCalled('custom_hold_one')
self.mock_event('custom_hold_both')
self.mock_event('custom_hold_inactive')
self.mock_event('custom_hold_one')
# hit the second switch again, event should not be posted
self.hit_switch_and_run('switch2', .5)
self.assertEventNotCalled('custom_hold_both')
self.assertEventNotCalled('custom_hold_inactive')
self.assertEventNotCalled('custom_hold_one')
# advance more than 1s to see the event
self.advance_time_and_run(.6)
self.assertEventCalled('custom_hold_both')
self.assertEventNotCalled('custom_hold_inactive')
self.assertEventNotCalled('custom_hold_one')
# release both
self.release_switch_and_run('switch1', .1)
self.release_switch_and_run('switch2', .1)
self.assertEventCalled('custom_hold_inactive')
def test_custom_release(self):
# release time of 1s
self.mock_event('custom_release_both')
self.mock_event('custom_release_inactive')
self.mock_event('custom_release_one')
# both switches should post both
self.hit_switch_and_run('switch1', 1)
self.hit_switch_and_run('switch2', .1)
self.assertEventCalled('custom_release_both')
self.assertEventNotCalled('custom_release_inactive')
self.assertEventNotCalled('custom_release_one')
# release 1, should not post one because it's less than 1s
self.release_switch_and_run('switch2', .1)
self.assertEventNotCalled('custom_release_inactive')
self.assertEventNotCalled('custom_release_one')
# wait more than 1s and one should be posted
self.advance_time_and_run(1)
self.assertEventCalled('custom_release_one')
# release the other switch, inactive should not be posted yet
self.release_switch_and_run('switch1', .1)
self.assertEventNotCalled('custom_release_inactive')
# wait more than 1s for the inactive event
self.advance_time_and_run(1)
self.assertEventCalled('custom_release_inactive')
# start over
self.hit_switch_and_run('switch1', 1)
self.hit_switch_and_run('switch2', 1)
self.mock_event('custom_release_both')
self.mock_event('custom_release_inactive')
self.mock_event('custom_release_one')
# release and reactivate in less than 1s, no new events
self.release_switch_and_run('switch2', .5)
self.hit_switch_and_run('switch2', .1)
self.assertEventNotCalled('custom_release_both')
self.assertEventNotCalled('custom_release_inactive')
self.assertEventNotCalled('custom_release_one')
# make sure no new events after the initial release time passed
self.advance_time_and_run(1)
self.assertEventNotCalled('custom_release_both')
self.assertEventNotCalled('custom_release_inactive')
self.assertEventNotCalled('custom_release_one')
# release and reactive both in less than 1s, no new events
self.release_switch_and_run('switch1', .1)
self.release_switch_and_run('switch2', .1)
self.hit_switch_and_run('switch1', .1)
self.hit_switch_and_run('switch2', .1)
self.assertEventNotCalled('custom_release_both')
self.assertEventNotCalled('custom_release_inactive')
self.assertEventNotCalled('custom_release_one')
# make sure no new events
self.advance_time_and_run(1)
self.assertEventNotCalled('custom_release_both')
self.assertEventNotCalled('custom_release_inactive')
self.assertEventNotCalled('custom_release_one')
# now do the whole thing again, with the switches flipped
self.release_switch_and_run('switch2', 2)
self.mock_event('custom_release_both')
self.mock_event('custom_release_inactive')
self.mock_event('custom_release_one')
# both switches should post both
self.hit_switch_and_run('switch2', 1)
self.hit_switch_and_run('switch1', .1)
self.assertEventCalled('custom_release_both')
self.assertEventNotCalled('custom_release_inactive')
self.assertEventNotCalled('custom_release_one')
# release 1, should not post one because it's less than 1s
self.release_switch_and_run('switch1', .1)
self.assertEventNotCalled('custom_release_inactive')
self.assertEventNotCalled('custom_release_one')
# wait more than 1s and one should be posted
self.advance_time_and_run(1)
self.assertEventCalled('custom_release_one')
# release the other switch, inactive should not be posted yet
self.release_switch_and_run('switch2', .1)
self.assertEventNotCalled('custom_release_inactive')
# wait more than 1s for the inactive event
self.advance_time_and_run(1)
self.assertEventCalled('custom_release_inactive')
# start over
self.hit_switch_and_run('switch2', 1)
self.hit_switch_and_run('switch1', .1)
self.mock_event('custom_release_both')
self.mock_event('custom_release_inactive')
self.mock_event('custom_release_one')
# release and reactivate in less than 1s, no new events
self.release_switch_and_run('switch1', .5)
self.hit_switch_and_run('switch1', .1)
self.assertEventNotCalled('custom_release_both')
self.assertEventNotCalled('custom_release_inactive')
self.assertEventNotCalled('custom_release_one')
def test_custom_times_multiple_switches(self):
# this is a sort of catch all with all three types of custom times,
# but with multiple switches
# time is >1s from the first switch
self._reset_custom_times_multiple_switches()
self.hit_switch_and_run('switch5', .5)
self.hit_switch_and_run('switch6', .6)
self.hit_switch_and_run('switch7', .1)
self.assertEventNotCalled('custom_times_multiple_switches_both')
# time is <1s from first switch
self._reset_custom_times_multiple_switches()
self.hit_switch_and_run('switch5', .5)
self.hit_switch_and_run('switch6', .1)
self.hit_switch_and_run('switch7', .1)
self.assertEventNotCalled('custom_times_multiple_switches_both')
# there's a 1s hold time
self.advance_time_and_run(1.1)
self.assertEventCalled('custom_times_multiple_switches_both')
# release switch7, one event should post after 1s
self.release_switch_and_run('switch7', .1)
self.assertEventNotCalled('custom_times_multiple_switches_one')
self.advance_time_and_run(1)
self.assertEventCalled('custom_times_multiple_switches_one')
# test hold time
self._reset_custom_times_multiple_switches()
self.hit_switch_and_run('switch5', .1)
self.hit_switch_and_run('switch6', .1)
self.hit_switch_and_run('switch7', .1)
self.assertEventNotCalled('custom_times_multiple_switches_both')
self.advance_time_and_run(1)
self.assertEventCalled('custom_times_multiple_switches_both')
def _reset_custom_times_multiple_switches(self):
self.release_switch_and_run('switch5', .1)
self.release_switch_and_run('switch6', .1)
self.release_switch_and_run('switch7', .1)
self.release_switch_and_run('switch8', .1)
self.advance_time_and_run(2)
self.mock_event('custom_times_multiple_switches_both')
self.mock_event('custom_times_multiple_switches_inactive')
self.mock_event('custom_times_multiple_switches_one')
def test_custom_events(self):
self.mock_event('custom_events_both')
self.mock_event('custom_events_inactive')
self.mock_event('custom_events_one')
self.mock_event('active_event')
self.mock_event('active_event2')
self.mock_event('inactive_event')
self.mock_event('one_event')
self.hit_switch_and_run('switch1', .1)
self.hit_switch_and_run('switch2', .1)
self.release_switch_and_run('switch1', .1)
self.release_switch_and_run('switch2', .1)
self.assertEventNotCalled('custom_events_both')
self.assertEventNotCalled('custom_events_inactive')
self.assertEventNotCalled('custom_events_one')
self.assertEventCalled('active_event')
self.assertEventCalled('active_event2')
self.assertEventCalled('inactive_event')
self.assertEventCalled('one_event')
def test_combo_switches_in_mode(self):
self.mock_event('mode1_combo_both')
self.mock_event('mode1_combo_inactive')
self.mock_event('mode1_combo_one')
self.hit_switch_and_run('switch1', .1)
self.hit_switch_and_run('switch2', .1)
self.release_switch_and_run('switch1', .1)
self.release_switch_and_run('switch2', .1)
self.assertEventNotCalled('mode1_combo_both')
self.assertEventNotCalled('mode1_combo_inactive')
self.assertEventNotCalled('mode1_combo_one')
self.advance_time_and_run(5)
self.machine.modes.mode1.start()
self.advance_time_and_run()
self.hit_switch_and_run('switch1', .1)
self.hit_switch_and_run('switch2', .1)
self.release_switch_and_run('switch1', .1)
self.release_switch_and_run('switch2', .1)
self.assertEventCalled('mode1_combo_both')
self.assertEventCalled('mode1_combo_inactive')
self.assertEventCalled('mode1_combo_one')
self.machine.modes.mode1.stop()
self.advance_time_and_run()
self.mock_event('mode1_combo_both')
self.mock_event('mode1_combo_inactive')
self.mock_event('mode1_combo_one')
self.hit_switch_and_run('switch1', .1)
self.hit_switch_and_run('switch2', .1)
self.release_switch_and_run('switch1', .1)
self.release_switch_and_run('switch2', .1)
self.assertEventNotCalled('mode1_combo_both')
self.assertEventNotCalled('mode1_combo_inactive')
self.assertEventNotCalled('mode1_combo_one')
def test_built_in_combos(self):
self.mock_event('flipper_cancel')
self.hit_switch_and_run('switch9', .1)
self.hit_switch_and_run('switch10', .1)
self.assertEventCalled('flipper_cancel')
# make sure it works with long times too
self.release_switch_and_run('switch9', .1)
self.release_switch_and_run('switch10', .1)
self.mock_event('flipper_cancel')
self.hit_switch_and_run('switch9', 10)
self.hit_switch_and_run('switch10', .1)
self.assertEventCalled('flipper_cancel')
| 41.06518
| 81
| 0.709745
| 2,868
| 23,941
| 5.543236
| 0.051604
| 0.191722
| 0.079255
| 0.059379
| 0.890993
| 0.86816
| 0.837024
| 0.820229
| 0.8118
| 0.802491
| 0
| 0.015922
| 0.197235
| 23,941
| 582
| 82
| 41.135739
| 0.811281
| 0.093814
| 0
| 0.812808
| 0
| 0
| 0.282493
| 0.136765
| 0
| 0
| 0
| 0
| 0.448276
| 1
| 0.03202
| false
| 0
| 0.004926
| 0.004926
| 0.044335
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
db31daae094f3fa4325b1505619d3fc097118e48
| 1,445
|
py
|
Python
|
gabriel_lego/lego_engine/tasks/task_generated_20.py
|
molguin92/gabriel-lego-py3
|
2f8828326ca025997687a19d1af80bc1590a9290
|
[
"Apache-2.0"
] | null | null | null |
gabriel_lego/lego_engine/tasks/task_generated_20.py
|
molguin92/gabriel-lego-py3
|
2f8828326ca025997687a19d1af80bc1590a9290
|
[
"Apache-2.0"
] | 1
|
2019-09-10T23:41:41.000Z
|
2019-09-11T20:21:11.000Z
|
gabriel_lego/lego_engine/tasks/task_generated_20.py
|
molguin92/gabriel-lego-py3
|
2f8828326ca025997687a19d1af80bc1590a9290
|
[
"Apache-2.0"
] | 1
|
2022-02-22T15:29:27.000Z
|
2022-02-22T15:29:27.000Z
|
from numpy import array
# Automatically generated task with 20 steps
# Labels: nothing:0, white:1, green:2, yellow:3, red:4, blue:5, black:6,
# unsure:7
bitmaps = \
[array([[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 3, 0, 0],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 3, 0, 5],
[4, 4, 4, 4, 4, 4]]),
array([[3, 0, 3, 3, 0, 5],
[4, 4, 4, 4, 4, 4]]),
array([[3, 0, 3, 3, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[3, 2, 3, 3, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[3, 0, 3, 3, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[3, 0, 3, 3, 0, 5],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 3, 0, 5],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 3, 0, 0],
[4, 4, 4, 4, 4, 4]]),
array([[4, 4, 4, 4, 4, 4]]),
array([[0, 3, 3, 3, 3, 0],
[4, 4, 4, 4, 4, 4]]),
array([[0, 3, 3, 3, 3, 4],
[4, 4, 4, 4, 4, 4]]),
array([[3, 3, 3, 3, 3, 4],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 0, 3, 3, 0],
[3, 3, 3, 3, 3, 4],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 0, 3, 3, 3],
[3, 3, 3, 3, 3, 4],
[4, 4, 4, 4, 4, 4]]),
array([[0, 2, 0, 3, 3, 3],
[3, 3, 3, 3, 3, 4],
[4, 4, 4, 4, 4, 4]]),
array([[0, 2, 2, 3, 3, 3],
[3, 3, 3, 3, 3, 4],
[4, 4, 4, 4, 4, 4]]),
array([[3, 2, 2, 3, 3, 3],
[3, 3, 3, 3, 3, 4],
[4, 4, 4, 4, 4, 4]]),
array([[4, 4, 4, 4, 0, 0],
[3, 2, 2, 3, 3, 3],
[3, 3, 3, 3, 3, 4],
[4, 4, 4, 4, 4, 4]])]
| 25.803571
| 72
| 0.338408
| 318
| 1,445
| 1.537736
| 0.08805
| 0.453988
| 0.552147
| 0.564417
| 0.752556
| 0.752556
| 0.750511
| 0.750511
| 0.750511
| 0.736196
| 0
| 0.294427
| 0.341869
| 1,445
| 55
| 73
| 26.272727
| 0.219769
| 0.084429
| 0
| 0.659574
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.021277
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
c0187b38bf55f9cb8055d2732e7cddaa31d6080b
| 10,751
|
py
|
Python
|
tests/test_bitshift_opcodes.py
|
bezzy199991/emupy6502
|
d4efe413c28e43e313f52a12e646eee8b52c3205
|
[
"MIT"
] | null | null | null |
tests/test_bitshift_opcodes.py
|
bezzy199991/emupy6502
|
d4efe413c28e43e313f52a12e646eee8b52c3205
|
[
"MIT"
] | 2
|
2019-10-31T11:56:28.000Z
|
2019-10-31T15:49:09.000Z
|
tests/test_bitshift_opcodes.py
|
bezzy199991/emupy6502
|
d4efe413c28e43e313f52a12e646eee8b52c3205
|
[
"MIT"
] | 1
|
2019-10-31T10:21:34.000Z
|
2019-10-31T10:21:34.000Z
|
import unittest
import pytest
from unittest.mock import patch, Mock
from emupy6502.memory_controller import MemoryController
from emupy6502.registers import Registers
from emupy6502.opcodes import OpCode
@pytest.fixture
def registers():
return Registers()
@pytest.fixture
def opcode():
return OpCode()
@pytest.fixture
def mock_memory_controller():
return Mock()
def test_execute_asl_accumulator_positive(opcode, registers, mock_memory_controller):
registers.accumulator = 3
# we're mocking 0x0A 0x21
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x0A, registers, mock_memory_controller)
assert count == 2
mock_memory_controller.read.assert_not_called()
assert registers.pc == 1
assert registers.accumulator == 6
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_asl_accumulator_negative(opcode, registers, mock_memory_controller):
registers.accumulator = -3
# we're mocking 0x0A 0x21
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x0A, registers, mock_memory_controller)
assert count == 2
mock_memory_controller.read.assert_not_called()
assert registers.pc == 1
assert registers.accumulator == 0xfa
assert registers.zero_flag == False
assert registers.carry_flag
assert registers.negative_flag
def test_execute_asl_zeropage(opcode, registers, mock_memory_controller):
# we're mocking 0x06 0x30
mock_memory_controller.read.side_effect = [0x30, 0x20]
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x06, registers, mock_memory_controller)
assert count == 5
assert mock_memory_controller.read.call_count == 2
assert mock_memory_controller.read.call_args_list[0] == unittest.mock.call(1)
mock_memory_controller.write.assert_called_with(0x30, 0x40)
assert registers.pc == 2
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_asl_zeropage_x(opcode, registers, mock_memory_controller):
registers.x_index = 3
# we're mocking 0x16 0x21 so store to [0x0024]
mock_memory_controller.read.side_effect = [0x21, 0x10]
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x16, registers, mock_memory_controller)
assert count == 6
# these are checked more thoroughly in addressing_modes_tests
assert mock_memory_controller.read.call_count == 2
assert mock_memory_controller.read.call_args_list[0] == unittest.mock.call(1)
mock_memory_controller.write.assert_called_with(0x24, 0x20)
assert registers.pc == 2
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_asl_zeropage_x_wrap(opcode, registers, mock_memory_controller):
registers.x_index = 3
# we're mocking 0x16 0x21 so store to [0x0024]
mock_memory_controller.read.side_effect = [0xfe, 0xf0]
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x16, registers, mock_memory_controller)
assert count == 6
# these are checked more thoroughly in addressing_modes_tests
assert mock_memory_controller.read.call_count == 2
assert mock_memory_controller.read.call_args_list[0] == unittest.mock.call(1)
mock_memory_controller.write.assert_called_with(0x01, 0xe0)
assert registers.pc == 2
assert registers.zero_flag == False
assert registers.carry_flag
assert registers.negative_flag
def test_execute_asl_absolute(opcode, registers, mock_memory_controller):
registers.accumulator = 0x20
# we're mocking 0x0E 0x0 0x20 so store to [0x2000]
mock_memory_controller.read.side_effect = [0, 0x20, 0x21]
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x0E, registers, mock_memory_controller)
assert count == 6
# these are checked more thoroughly in addressing_modes_tests
assert mock_memory_controller.read.call_count == 3
assert mock_memory_controller.read.call_args_list[0] == unittest.mock.call(1)
mock_memory_controller.write.assert_called_with(0x2000, 0x42)
assert registers.pc == 3
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_asl_absolute_x(opcode, registers, mock_memory_controller):
registers.x_index = 3
# we're mocking 0x1E 0x2100 so write is to [0x2103]
mock_memory_controller.read.side_effect = [0, 0x21, 0xfe]
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x1E, registers, mock_memory_controller)
assert count == 7
# these are checked more thoroughly in addressing_modes_tests
assert mock_memory_controller.read.call_count == 3
mock_memory_controller.write.assert_called_with(0x2103, 0xfc)
assert registers.pc == 3
assert registers.zero_flag == False
assert registers.carry_flag
assert registers.negative_flag
def test_execute_rol_accumulator_carry_clear_sign_clear(opcode, registers, mock_memory_controller):
registers.accumulator = 3
# we're mocking 0x2A
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x2A, registers, mock_memory_controller)
assert count == 2
mock_memory_controller.read.assert_not_called()
assert registers.pc == 1
assert registers.accumulator == 6
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_rol_accumulator_carry_set_sign_clear(opcode, registers, mock_memory_controller):
registers.accumulator = 3
registers.carry_flag = True
# we're mocking 0x2A
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x2A, registers, mock_memory_controller)
assert count == 2
mock_memory_controller.read.assert_not_called()
assert registers.pc == 1
assert registers.accumulator == 7
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_rol_accumulator_carry_clear_sign_set(opcode, registers, mock_memory_controller):
registers.accumulator = 0xc0
# we're mocking 0x2A
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x2A, registers, mock_memory_controller)
assert count == 2
mock_memory_controller.read.assert_not_called()
assert registers.pc == 1
assert registers.accumulator == 0x80
assert registers.zero_flag == False
assert registers.carry_flag
assert registers.negative_flag
def test_execute_rol_zeropage_carry_clear_sign_clear(opcode, registers, mock_memory_controller):
mock_memory_controller.read.side_effect = [0x30, 3]
# we're mocking 0x26 0x30 and [0x30] = 3
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x26, registers, mock_memory_controller)
assert count == 5
assert mock_memory_controller.read.call_count == 2
mock_memory_controller.write.assert_called_with(0x30, 6)
assert registers.pc == 2
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_rol_zeropage_carry_set_sign_clear(opcode, registers, mock_memory_controller):
registers.carry_flag = True
mock_memory_controller.read.side_effect = [0x30, 3]
# we're mocking 0x26 0x30 and [0x30] = 3
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x26, registers, mock_memory_controller)
assert count == 5
assert mock_memory_controller.read.call_count == 2
mock_memory_controller.write.assert_called_with(0x30, 7)
assert registers.pc == 2
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_rol_zeropage_carry_clear_sign_set(opcode, registers, mock_memory_controller):
registers.accumulator = 0xc0
mock_memory_controller.read.side_effect = [0x30, 0xc0]
# we're mocking 0x26 0x30 and [0x30] = 0xc0
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x26, registers, mock_memory_controller)
assert count == 5
mock_memory_controller.write.assert_called_with(0x30, 0x80)
assert registers.pc == 2
assert registers.zero_flag == False
assert registers.carry_flag
assert registers.negative_flag
def test_execute_rol_absolute_carry_clear_sign_clear(opcode, registers, mock_memory_controller):
mock_memory_controller.read.side_effect = [0x00, 0x30, 3]
# we're mocking 0x2E 0x30 and [0x3000] = 3
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x2E, registers, mock_memory_controller)
assert count == 6
assert mock_memory_controller.read.call_count == 3
mock_memory_controller.write.assert_called_with(0x3000, 6)
assert registers.pc == 3
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_rol_absolute_carry_set_sign_clear(opcode, registers, mock_memory_controller):
registers.carry_flag = True
mock_memory_controller.read.side_effect = [0x00, 0x30, 3]
# we're mocking 0x2E 0x30 and [0x3000] = 3
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x2E, registers, mock_memory_controller)
assert count == 6
assert mock_memory_controller.read.call_count == 3
mock_memory_controller.write.assert_called_with(0x3000, 7)
assert registers.pc == 3
assert registers.zero_flag == False
assert registers.carry_flag == False
assert registers.negative_flag == False
def test_execute_rol_absolute_carry_clear_sign_set(opcode, registers, mock_memory_controller):
registers.accumulator = 0xc0
mock_memory_controller.read.side_effect = [0x00, 0x30, 0xc0]
# we're mocking 0x2E 0x30 and [0x3000] = 3
registers.pc += 1 #need to fake the cpu reading the opcode
count = opcode.execute(0x2E, registers, mock_memory_controller)
assert count == 6
assert mock_memory_controller.read.call_count == 3
mock_memory_controller.write.assert_called_with(0x3000, 0x80)
assert registers.pc == 3
assert registers.zero_flag == False
assert registers.carry_flag
assert registers.negative_flag
| 38.53405
| 99
| 0.750721
| 1,468
| 10,751
| 5.251362
| 0.073569
| 0.155662
| 0.191983
| 0.120379
| 0.930471
| 0.916721
| 0.911532
| 0.886496
| 0.8756
| 0.874303
| 0
| 0.044875
| 0.175053
| 10,751
| 279
| 100
| 38.53405
| 0.824332
| 0.132639
| 0
| 0.729064
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027344
| 0
| 0.566502
| 1
| 0.093596
| false
| 0
| 0.029557
| 0.014778
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c01ef6ef5979b7e4b480fbd7a4c7a0ecce6cfd9c
| 399
|
py
|
Python
|
art/estimators/object_detection/__init__.py
|
monshri/adversarial-robustness-toolbox
|
6465240cb6a71bc376dae52459a7133e403df8d2
|
[
"MIT"
] | 1,350
|
2020-07-14T08:06:55.000Z
|
2022-03-31T19:22:25.000Z
|
art/estimators/object_detection/__init__.py
|
monshri/adversarial-robustness-toolbox
|
6465240cb6a71bc376dae52459a7133e403df8d2
|
[
"MIT"
] | 936
|
2020-07-14T03:33:00.000Z
|
2022-03-31T23:05:29.000Z
|
art/estimators/object_detection/__init__.py
|
monshri/adversarial-robustness-toolbox
|
6465240cb6a71bc376dae52459a7133e403df8d2
|
[
"MIT"
] | 413
|
2020-07-16T16:00:16.000Z
|
2022-03-29T10:31:12.000Z
|
"""
Module containing estimators for object detection.
"""
from art.estimators.object_detection.object_detector import ObjectDetectorMixin
from art.estimators.object_detection.python_object_detector import PyTorchObjectDetector
from art.estimators.object_detection.pytorch_faster_rcnn import PyTorchFasterRCNN
from art.estimators.object_detection.tensorflow_faster_rcnn import TensorFlowFasterRCNN
| 44.333333
| 88
| 0.889724
| 45
| 399
| 7.644444
| 0.422222
| 0.218023
| 0.197674
| 0.267442
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.062657
| 399
| 8
| 89
| 49.875
| 0.919786
| 0.125313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.