hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
59d5c57a72fded593d3a1206cea6cb5885b7836f
| 17,436
|
py
|
Python
|
test/testsArithmeticExpressions/testPower.py
|
mouton5000/DiscreteEventApplicationEditor
|
4a4272fd9b0a7f3f228fee1e9e7b351e4a21cd33
|
[
"MIT"
] | null | null | null |
test/testsArithmeticExpressions/testPower.py
|
mouton5000/DiscreteEventApplicationEditor
|
4a4272fd9b0a7f3f228fee1e9e7b351e4a21cd33
|
[
"MIT"
] | null | null | null |
test/testsArithmeticExpressions/testPower.py
|
mouton5000/DiscreteEventApplicationEditor
|
4a4272fd9b0a7f3f228fee1e9e7b351e4a21cd33
|
[
"MIT"
] | null | null | null |
__author__ = 'mouton'
from triggerExpressions import Evaluation
from unittest import TestCase
from math import pi, sqrt
from arithmeticExpressions import ALitteral, Power, UndefinedLitteral, SelfLitteral
from database import Variable
class TestPower(TestCase):
@classmethod
def setUpClass(cls):
import grammar.grammars
grammar.grammars.compileGrammars()
def setUp(self):
self.eval1 = Evaluation()
self.eval2 = Evaluation()
self.eval2[Variable('X')] = 1
self.eval2[Variable('T')] = 'abc'
self.eval2[Variable('Z')] = 12.0
def test_integers_power_with_empty_evaluation(self):
a1 = ALitteral(10)
a2 = ALitteral(20)
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval1), 10 ** 20)
def test_integers_power_with_non_empty_evaluation(self):
a1 = ALitteral(10)
a2 = ALitteral(20)
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2), 10 ** 20)
def test_strings_power_with_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_strings_power_with_non_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_floats_power_with_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval1), pi ** sqrt(2))
def test_floats_power_with_non_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2), pi ** sqrt(2))
def test_integer_string_power_with_empty_evaluation(self):
a1 = ALitteral(10)
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_integer_string_power_with_non_empty_evaluation(self):
a1 = ALitteral(10)
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_string_integer_power_with_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = ALitteral(20)
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_string_integer_power_with_non_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = ALitteral(20)
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_integer_float_power_with_empty_evaluation(self):
a1 = ALitteral(10)
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval1), 10 ** sqrt(2))
def test_integer_float_power_with_non_empty_evaluation(self):
a1 = ALitteral(10)
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2), 10 ** sqrt(2))
def test_float_integer_power_with_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = ALitteral(20)
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval1), pi ** 20)
def test_float_integer_power_with_non_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = ALitteral(20)
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2), pi ** 20)
def test_string_float_power_with_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_string_float_power_with_non_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_float_string_power_with_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_float_string_power_with_non_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_integer_undefined_power_with_empty_evaluation(self):
a1 = ALitteral(10)
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_integer_undefined_power_with_non_empty_evaluation(self):
a1 = ALitteral(10)
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_undefined_integer_power_with_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = ALitteral(20)
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_undefined_integer_power_with_non_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = ALitteral(20)
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_string_undefined_power_with_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_string_undefined_power_with_non_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_undefined_string_power_with_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_undefined_string_power_with_non_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_float_undefined_power_with_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_float_undefined_power_with_non_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_undefined_float_power_with_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_undefined_float_power_with_non_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_undefined_undefined_power_with_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1)
def test_undefined_undefined_power_with_non_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_integer_evaluated_variable_power(self):
a1 = ALitteral(10)
a2 = ALitteral(Variable('X'))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2), 10)
def test_evaluated_variable_integer_power(self):
a1 = ALitteral(Variable('X'))
a2 = ALitteral(20)
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2), 1)
def test_string_evaluated_variable_power(self):
a1 = ALitteral('abc')
a2 = ALitteral(Variable('X'))
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_evaluated_variable_string_power(self):
a1 = ALitteral(Variable('X'))
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2)
def test_float_evaluated_variable_power(self):
a1 = ALitteral(pi)
a2 = ALitteral(Variable('X'))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2), pi)
def test_evaluated_variable_float_power(self):
a1 = ALitteral(Variable('X'))
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2), 1)
def test_evaluated_variable_evaluated_variable_power(self):
a1 = ALitteral(Variable('X'))
a2 = ALitteral(Variable('X'))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2), 1)
def test_evaluated_variable_undefined_power(self):
a1 = ALitteral(Variable('X'))
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2), 0
def test_undefined_evaluated_variable_power(self):
a1 = UndefinedLitteral()
a2 = ALitteral(Variable('X'))
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2), 0
def test_integer_unevaluated_variable_power(self):
a1 = ALitteral(10)
a2 = ALitteral(Variable('Y'))
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_unevaluated_variable_integer_power(self):
a1 = ALitteral(Variable('Y'))
a2 = ALitteral(20)
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_string_unevaluated_variable_power(self):
a1 = ALitteral('abc')
a2 = ALitteral(Variable('Y'))
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_unevaluated_variable_string_power(self):
a1 = ALitteral(Variable('Y'))
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_float_unevaluated_variable_power(self):
a1 = ALitteral(pi)
a2 = ALitteral(Variable('Y'))
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_unevaluated_variable_float_power(self):
a1 = ALitteral(Variable('Y'))
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_unevaluated_variable_unevaluated_variable_power(self):
a1 = ALitteral(Variable('Y'))
a2 = ALitteral(Variable('Y'))
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_unevaluated_variable_evaluated_variable_power(self):
a1 = ALitteral(Variable('Y'))
a2 = ALitteral(Variable('X'))
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_evaluated_variable_unevaluated_variable_power(self):
a1 = ALitteral(Variable('X'))
a2 = ALitteral(Variable('Y'))
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_unevaluated_variable_undefined_power(self):
a1 = ALitteral(Variable('Y'))
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2), 0
def test_undefined_unevaluated_variable_power(self):
a1 = UndefinedLitteral()
a2 = ALitteral(Variable('Y'))
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2)
def test_integer_self_litteral_power_with_empty_evaluation(self):
a1 = ALitteral(10)
a2 = SelfLitteral()
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval1, 1), 10)
def test_integer_self_litteral_power_with_non_empty_evaluation(self):
a1 = ALitteral(10)
a2 = SelfLitteral()
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2, 1), 10)
def test_self_litteral_integer_power_with_empty_evaluation(self):
a1 = SelfLitteral()
a2 = ALitteral(20)
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval1, 1), 1)
def test_self_litteral_integer_power_with_non_empty_evaluation(self):
a1 = SelfLitteral()
a2 = ALitteral(20)
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2, 1), 1)
def test_string_self_litteral_power_with_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = SelfLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1, 1)
def test_string_self_litteral_power_with_non_empty_evaluation(self):
a1 = ALitteral('abc')
a2 = SelfLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2, 1)
def test_self_litteral_string_power_with_empty_evaluation(self):
a1 = SelfLitteral()
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1, 1)
def test_self_litteral_string_power_with_non_empty_evaluation(self):
a1 = SelfLitteral()
a2 = ALitteral('def')
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2, 1)
def test_float_self_litteral_power_with_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = SelfLitteral()
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval1, 1), pi)
def test_float_self_litteral_power_with_non_empty_evaluation(self):
a1 = ALitteral(pi)
a2 = SelfLitteral()
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2, 1), pi)
def test_self_litteral_float_power_with_empty_evaluation(self):
a1 = SelfLitteral()
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval1, 1), 1)
def test_self_litteral_float_power_with_non_empty_evaluation(self):
a1 = SelfLitteral()
a2 = ALitteral(sqrt(2))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2, 1), 1)
def test_self_litteral_self_litteral_power_with_empty_evaluation(self):
a1 = SelfLitteral()
a2 = SelfLitteral()
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval1, 1), 1)
def test_self_litteral_self_litteral_power_with_non_empty_evaluation(self):
a1 = SelfLitteral()
a2 = SelfLitteral()
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2, 1), 1)
def test_self_litteral_undefined_power_with_empty_evaluation(self):
a1 = SelfLitteral()
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1, 1)
def test_self_litteral_undefined_power_with_non_empty_evaluation(self):
a1 = SelfLitteral()
a2 = UndefinedLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2, 1)
def test_undefined_self_litteral_power_with_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = SelfLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval1, 1)
def test_undefined_self_litteral_power_with_non_empty_evaluation(self):
a1 = UndefinedLitteral()
a2 = SelfLitteral()
expr = Power(a1, a2)
with self.assertRaises(TypeError):
expr.value(self.eval2, 1)
def test_self_litteral_evaluated_variable_power(self):
a1 = ALitteral(Variable('X'))
a2 = SelfLitteral()
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2, 2), 1)
def test_evaluated_variable_self_litteral_power(self):
a1 = SelfLitteral()
a2 = ALitteral(Variable('X'))
expr = Power(a1, a2)
self.assertEqual(expr.value(self.eval2, 2), 2)
def test_self_litteral_unevaluated_variable_power(self):
a1 = SelfLitteral()
a2 = ALitteral(Variable('Y'))
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2, 1)
def test_unevaluated_variable_self_litteral_power(self):
a1 = ALitteral(Variable('Y'))
a2 = SelfLitteral()
expr = Power(a1, a2)
with self.assertRaises(ValueError):
expr.value(self.eval2, 1)
| 33.856311
| 83
| 0.631796
| 2,065
| 17,436
| 5.11816
| 0.030993
| 0.049011
| 0.077018
| 0.091021
| 0.952786
| 0.938972
| 0.923361
| 0.89157
| 0.840666
| 0.790709
| 0
| 0.038958
| 0.262446
| 17,436
| 515
| 84
| 33.856311
| 0.782893
| 0
| 0
| 0.754587
| 0
| 0
| 0.006423
| 0
| 0
| 0
| 0
| 0
| 0.169725
| 1
| 0.174312
| false
| 0
| 0.013761
| 0
| 0.190367
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c6fd2c4342e94cc248f18cbbe9cb37bd7ae4efa3
| 86
|
py
|
Python
|
tests/data/__init__.py
|
discohead/jesse
|
5f025cc72adb33132b75a516f74f96b52ca12af3
|
[
"MIT"
] | 3,999
|
2018-11-09T10:38:51.000Z
|
2022-03-31T12:29:12.000Z
|
tests/data/__init__.py
|
discohead/jesse
|
5f025cc72adb33132b75a516f74f96b52ca12af3
|
[
"MIT"
] | 172
|
2020-04-16T16:19:08.000Z
|
2022-03-28T13:28:55.000Z
|
tests/data/__init__.py
|
discohead/jesse
|
5f025cc72adb33132b75a516f74f96b52ca12af3
|
[
"MIT"
] | 495
|
2019-03-01T21:48:53.000Z
|
2022-03-30T15:35:19.000Z
|
from .test_candles_0 import test_candles_0
from .test_candles_1 import test_candles_1
| 28.666667
| 42
| 0.883721
| 16
| 86
| 4.25
| 0.375
| 0.647059
| 0.441176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 0.093023
| 86
| 2
| 43
| 43
| 0.820513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0511eada8b850f9ee27a4f47809b0659412ce1d8
| 125
|
py
|
Python
|
raster2points/__init__.py
|
wri/raster2csv
|
5ce5e10bb02dfd448327e18e98df17c0e8cbd1e5
|
[
"MIT"
] | 7
|
2019-02-01T18:19:57.000Z
|
2021-06-23T04:35:12.000Z
|
raster2points/__init__.py
|
wri/raster2csv
|
5ce5e10bb02dfd448327e18e98df17c0e8cbd1e5
|
[
"MIT"
] | 2
|
2019-06-24T16:52:24.000Z
|
2019-10-25T14:08:05.000Z
|
raster2points/__init__.py
|
wri/raster2csv
|
5ce5e10bb02dfd448327e18e98df17c0e8cbd1e5
|
[
"MIT"
] | 3
|
2019-06-20T14:08:02.000Z
|
2021-06-18T14:00:22.000Z
|
from raster2points.raster2points import raster2df
from raster2points.raster2points import raster2csv
name = "raster2points"
| 25
| 50
| 0.864
| 12
| 125
| 9
| 0.5
| 0.314815
| 0.555556
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061947
| 0.096
| 125
| 4
| 51
| 31.25
| 0.893805
| 0
| 0
| 0
| 0
| 0
| 0.104
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
05376ba5ad44537c72a5b95f2904d5a9415c3f0e
| 9,461
|
py
|
Python
|
squadrons_text_map.py
|
moff-wildfire/squadrons_config
|
824881df7ceb5fda6ecb96656d85860870931901
|
[
"Unlicense"
] | null | null | null |
squadrons_text_map.py
|
moff-wildfire/squadrons_config
|
824881df7ceb5fda6ecb96656d85860870931901
|
[
"Unlicense"
] | null | null | null |
squadrons_text_map.py
|
moff-wildfire/squadrons_config
|
824881df7ceb5fda6ecb96656d85860870931901
|
[
"Unlicense"
] | null | null | null |
kbm_text_map = {
'ConceptMouseRecenter':'Mouse Recenter',
'ConceptContextualInteraction':'Contextual Interaction',
'ConceptPitch_P':'Pitch Up',
'ConceptPitch_N':'Pitch Down',
'ConceptYaw_P':'Yaw Right',
'ConceptYaw_N':'Yaw Left',
'ConceptRoll_P':'Roll Right',
'ConceptRoll_N':'Roll Left',
'ConceptThrottle_P':'Throttle Increase',
'ConceptThrottle_N':'Throttle Decrease',
'ConceptAfterburner':'Boost',
'ConceptDrift':'Drift (While Boosting)',
'ConceptFire':'Fire',
'ConceptFireAuxiliaryWeaponOneMain':'Fire Left Auxiliary',
'ConceptFireAuxiliaryWeaponOneDoubleTap':'Dumb-Fire Left Auxiliary',
'ConceptFireAuxiliaryWeaponTwoMain':'Fire Right Auxiliary',
'ConceptFireAuxiliaryWeaponTwoDoubleTap':'Dumb-Fire Right Auxiliary',
'ConceptFireCountermeasure':'Deploy Countermeasures',
'ConceptScoreboard':'Show Loadout',
'ConceptIncreaseEnginePower':'Increase Engine Power',
'ConceptMaximizeEnginePower':'Maximize Engine Power',
'ConceptIncreaseWeaponPower':'Increase Weapon Power',
'ConceptMaximizeWeaponPower':'Maximize Weapon Power',
'ConceptIncreaseShieldPower':'Increase Shield Power',
'ConceptMaximizeShieldPower':'Maximize Shield Power',
'ConceptResetSystemsPower':'Balance Power',
'ConceptShieldFront':'Focus Shields (Front)',
'ConceptShieldBack':'Focus Shields (Rear)',
'ConceptShieldBalance':'Focus Shields (Balanced)',
'ConceptEmergencyPowerTransferEngine':'Convert Power (Engines)',
'ConceptEmergencyPowerTransferWeapon':'Convert Power (Weapons)',
'ConceptEmergencyPowerTransferBalance':'Convert Power (Balanced)',
'ConceptPowerTransfer':'Focus Shields / Convert Power',
'ConceptPowerTransferMenuSelect':'Shield / Power Menu Select',
'ConceptPowerTransferMenuX_P':'Shield / Power Menu Right',
'ConceptPowerTransferMenuX_N':'Shield / Power Menu Left',
'ConceptPowerTransferMenuY_P':'Shield / Power Menu Up',
'ConceptPowerTransferMenuY_N':'Shield / Power Menu Down',
'ConceptTargetSelect':'Select Target Ahead',
'ConceptTargetCycleNext':'Cycle Targets',
'ConceptTargetHighestThreat':'Target My Attacker',
'ConceptTargetingMenu':'Targeting Wheel',
'ConceptTargetingMenuSelectTargetingMethod':'Targeting Wheel (Toggle Mode) - Select',
'ConceptTargetingMenuX_P':'Targeting Wheel X- Right',
'ConceptTargetingMenuX_N':'Targeting Wheel X - Left',
'ConceptTargetingMenuY_P':'Targeting Wheel Y - Up',
'ConceptTargetingMenuY_N':'Targeting Wheel Y - Down',
'ConceptTargetingMenuCycleAllEnemies':'Targeting Wheel Shortcut - All Enemies',
'ConceptTargetingMenuCycleEnemySquadron':'Targeting Wheel Shortcut - Enemy Squadron',
'ConceptTargetingMenuCycleEnemyAI':'Targeting Wheel Shortcut - Enemy AI',
'ConceptTargetingMenuCycleFlagshipSystems':'Targeting Wheel Shortcut - Flagship Systems',
'ConceptTargetingMenuCycleAllAllies':'Targeting Wheel Shortcut - All Allies',
'ConceptTargetingMenuCycleMySquadron':'Targeting Wheel Shortcut - My Squadron',
'ConceptTargetingMenuCycleTargetAttackers':'Targeting Wheel Shortcut - Target\'s Attackers',
'ConceptTargetingMenuCycleLastAttackers':'Targeting Wheel Shortcut - Last Attackers',
'ConceptTargetingMenuCycleObjectives':'Targeting Wheel Shortcut - Objectives',
'ConceptTargetingMenuCycleMissiles':'Targeting Wheel Shortcut - Missiles',
'ConceptTargetPing':'Ping Target',
'ConceptPingSelf':'Acknowledge Ping',
'ConceptCommMenu':'Comms Wheel',
'ConceptCommMenuSelect':'Comms Wheel (Toggle Mode) - Select',
'ConceptCommMenuX_P':'Comms Wheel - Navigate Right',
'ConceptCommMenuX_N':'Comms Wheel - Navigate Left',
'ConceptCommMenuY_P':'Comms Wheel - Navigate Up',
'ConceptCommMenuY_N':'Comms Wheel - Navigate Down',
'ConceptFreeLookTrigger':'Recalibrate VR',
'ConceptFreeLook':'Free Look',
'ConceptFreeLookCameraUp':'Free Look - Camera Pitch Up',
'ConceptFreeLookCameraDown':'Free Look - Camera Pitch Down',
'ConceptFreeLookCameraLeft':'Free Look - Camera Yaw Left',
'ConceptFreeLookCameraRight':'Free Look - Camera Yaw Right',
'ConceptCameraPitch_P':'Quick Look - Camera Pitch Up',
'ConceptCameraPitch_N':'Quick Look - Camera Pitch Down',
'ConceptCameraYaw_P':'Quick Look - Camera Yaw Left',
'ConceptCameraYaw_N':'Quick Look - Camera Yaw Right',
'ConceptCommMenuHelpMe':'Comms Wheel Shortcut - Help Me',
'ConceptCommMenuCheer':'Comms Wheel Shortcut - Cheer',
'ConceptCommMenuBrag':'Comms Wheel Shortcut - Brag',
'ConceptCommMenuBoo':'Comms Wheel Shortcut - Boo',
'ConceptCommMenuRegroup':'Comms Wheel Shortcut - Regroup',
'ConceptCommMenuBattleCry':'Comms Wheel Shortcut - Battle Cry',
'ConceptCommMenuThank':'Comms Wheel Shortcut - Thank',
'ConceptCommMenuPraise':'Comms Wheel Shortcut - Praise'
}
joystick_text_map = {
'ConceptMouseRecenter':'Mouse Recenter',
'ConceptContextualInteraction':'Contextual Interaction',
'ConceptPitch_P':'Pitch Up',
'ConceptPitch_N':'Pitch Down',
'ConceptYaw_P':'Yaw Right',
'ConceptYaw_N':'Yaw Left',
'ConceptRoll_P':'Roll Right',
'ConceptRoll_N':'Roll Left',
'ConceptThrottle_P':'Throttle Increase',
'ConceptThrottle_N':'Throttle Decrease',
'ConceptAfterburner':'Combo - Boost / Drift',
'ConceptDrift':'Drift',
'ConceptFire':'Fire',
'ConceptFireAuxiliaryWeaponOneMain':'Combo - Left Aux / Dumb-Fire',
'ConceptFireAuxiliaryWeaponOneDoubleTap':'Dumb-Fire Left Auxiliary',
'ConceptFireAuxiliaryWeaponTwoMain':'Combo - Right Aux / Dumb-Fire',
'ConceptFireAuxiliaryWeaponTwoDoubleTap':'Dumb-Fire Right Auxiliary',
'ConceptFireCountermeasure':'Deploy Countermeasures',
'ConceptScoreboard':'Show Loadout',
'ConceptControlEnginePower':'Combo - Power to Engines / Max',
'ConceptIncreaseEnginePower':'Increase Engine Power',
'ConceptMaximizeEnginePower':'Maximize Engine Power',
'ConceptControlWeaponPower':'Combo - Power to Weapons / Max',
'ConceptIncreaseWeaponPower':'Increase Weapon Power',
'ConceptMaximizeWeaponPower':'Maximize Weapon Power',
'ConceptControlShieldPower':'Combo - Power to Shields / Max',
'ConceptIncreaseShieldPower':'Increase Shield Power',
'ConceptMaximizeShieldPower':'Maximize Shield Power',
'ConceptControlBalancePower':'Balance Power',
'ConceptShieldFront':'Focus Shields (Front)',
'ConceptShieldBack':'Focus Shields (Rear)',
'ConceptShieldBalance':'Focus Shields (Balanced)',
'ConceptEmergencyPowerTransferEngine':'Convert Power (Engines)',
'ConceptEmergencyPowerTransferWeapon':'Convert Power (Weapons)',
'ConceptEmergencyPowerTransferBalance':'Convert Power (Balanced)',
'ConceptPowerTransfer':'Focus Shields / Convert Power',
'ConceptPowerTransferMenuSelect':'Shield / Power Menu Select',
'ConceptPowerTransferMenuX_P':'Shield / Power Menu Right',
'ConceptPowerTransferMenuX_N':'Shield / Power Menu Left',
'ConceptPowerTransferMenuY_P':'Shield / Power Menu Up',
'ConceptPowerTransferMenuY_N':'Shield / Power Menu Down',
'ConceptTargeting':'Combo - Select Target Ahead / Targeting Wheel',
'ConceptTargetSelect':'Select Target Ahead',
'ConceptTargetCycle':'Combo - Cycle Targets / Target My Attacker',
'ConceptTargetCycleNext':'Cycle Targets',
'ConceptTargetHighestThreat':'Target My Attacker',
'ConceptTargetingMenu':'Targeting Wheel',
'ConceptTargetingMenuSelectTargetingMethod':'Targeting Wheel (Toggle Mode) - Select',
'ConceptTargetingMenuX_P':'Targeting Wheel X - Right',
'ConceptTargetingMenuX_N':'Targeting Wheel X - Left',
'ConceptTargetingMenuY_P':'Targeting Wheel Y - Up',
'ConceptTargetingMenuY_N':'Targeting Wheel Y - Down',
'ConceptTargetingMenuCycleAllEnemies':'Targeting Wheel Shortcut - All Enemies',
'ConceptTargetingMenuCycleEnemySquadron':'Targeting Wheel Shortcut - Enemy Squadron',
'ConceptTargetingMenuCycleEnemyAI':'Targeting Wheel Shortcut - Enemy AI',
'ConceptTargetingMenuCycleFlagshipSystems':'Targeting Wheel Shortcut - Flagship Systems',
'ConceptTargetingMenuCycleAllAllies':'Targeting Wheel Shortcut - All Allies',
'ConceptTargetingMenuCycleMySquadron':'Targeting Wheel Shortcut - My Squadron',
'ConceptTargetingMenuCycleTargetAttackers':'Targeting Wheel Shortcut - Target\'s Attackers',
'ConceptTargetingMenuCycleLastAttackers':'Targeting Wheel Shortcut - Last Attackers',
'ConceptTargetingMenuCycleObjectives':'Targeting Wheel Shortcut - Objectives',
'ConceptTargetingMenuCycleMissiles':'Targeting Wheel Shortcut - Missiles',
'ConceptCommunication':'Combo - Ping / Ack / Comms Wheel',
'ConceptTargetPing':'Ping Target',
'ConceptPingSelf':'Acknowledge Ping',
'ConceptCommMenu':'Comms Wheel',
'ConceptCommMenuSelect':'Comms Wheel (Toggle Mode) - Select',
'ConceptCommMenuX_P':'Comms Wheel - Navigate Right',
'ConceptCommMenuX_N':'Comms Wheel - Navigate Left',
'ConceptCommMenuY_P':'Comms Wheel - Navigate Up',
'ConceptCommMenuY_N':'Comms Wheel - Navigate Down',
'ConceptFreeLookTrigger':'Recalibrate VR',
'ConceptFreeLook':'Free Look',
'ConceptFreeLookCameraUp':'Free Look - Camera Pitch Up',
'ConceptFreeLookCameraDown':'Free Look - Camera Pitch Down',
'ConceptFreeLookCameraLeft':'Free Look - Camera Yaw Left',
'ConceptFreeLookCameraRight':'Free Look - Camera Yaw Right',
'ConceptCameraPitch_P':'Quick Look - Camera Pitch Up',
'ConceptCameraPitch_N':'Quick Look - Camera Pitch Down',
'ConceptCameraYaw_P':'Quick Look - Camera Yaw Left',
'ConceptCameraYaw_N':'Quick Look - Camera Yaw Right',
'ConceptCommMenuHelpMe':'Comms Wheel Shortcut - Help Me',
'ConceptCommMenuCheer':'Comms Wheel Shortcut - Cheer',
'ConceptCommMenuBrag':'Comms Wheel Shortcut - Brag',
'ConceptCommMenuBoo':'Comms Wheel Shortcut - Boo',
'ConceptCommMenuRegroup':'Comms Wheel Shortcut - Regroup',
'ConceptCommMenuBattleCry':'Comms Wheel Shortcut - Battle Cry',
'ConceptCommMenuThank':'Comms Wheel Shortcut - Thank',
'ConceptCommMenuPraise':'Comms Wheel Shortcut - Praise'
}
| 53.451977
| 92
| 0.79907
| 842
| 9,461
| 8.916865
| 0.209026
| 0.062334
| 0.058604
| 0.008524
| 0.91236
| 0.91236
| 0.887853
| 0.887853
| 0.81593
| 0.81593
| 0
| 0
| 0.075891
| 9,461
| 177
| 93
| 53.451977
| 0.858744
| 0
| 0
| 0.886364
| 0
| 0
| 0.865779
| 0.301945
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0572264447c40c8026ecaf204d0573a01fc23773
| 18
|
py
|
Python
|
tests/parser/good/multiple-xor.py
|
Nakrez/RePy
|
057db55a99eac2c5cb3d622fa1f2e29f6083d8d6
|
[
"MIT"
] | 1
|
2020-11-24T05:24:26.000Z
|
2020-11-24T05:24:26.000Z
|
tests/parser/good/multiple-xor.py
|
Nakrez/RePy
|
057db55a99eac2c5cb3d622fa1f2e29f6083d8d6
|
[
"MIT"
] | null | null | null |
tests/parser/good/multiple-xor.py
|
Nakrez/RePy
|
057db55a99eac2c5cb3d622fa1f2e29f6083d8d6
|
[
"MIT"
] | null | null | null |
1 ^ 2 ^ 3 ^ 4 ^ 5
| 9
| 17
| 0.277778
| 5
| 18
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.555556
| 0.5
| 18
| 1
| 18
| 18
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
55958f57b074a31331279e44d3895f6d9b2f11cf
| 7,893
|
py
|
Python
|
alembic/versions/05a831a5db7b_added_indices_on_created_at.py
|
notconfusing/CivilServant
|
f9c7a2cf4de4f6506e37b7c33a7e512b893069c3
|
[
"MIT"
] | 17
|
2017-03-13T15:14:57.000Z
|
2020-01-07T19:12:49.000Z
|
alembic/versions/05a831a5db7b_added_indices_on_created_at.py
|
notconfusing/CivilServant
|
f9c7a2cf4de4f6506e37b7c33a7e512b893069c3
|
[
"MIT"
] | 32
|
2016-06-08T03:35:43.000Z
|
2016-11-30T18:50:49.000Z
|
alembic/versions/05a831a5db7b_added_indices_on_created_at.py
|
notconfusing/CivilServant
|
f9c7a2cf4de4f6506e37b7c33a7e512b893069c3
|
[
"MIT"
] | 4
|
2018-07-11T23:36:28.000Z
|
2019-11-16T19:32:33.000Z
|
"""Added indices on created_at
Revision ID: 05a831a5db7b
Revises: a571e57d884a
Create Date: 2017-07-24 23:44:23.301874
"""
# revision identifiers, used by Alembic.
revision = '05a831a5db7b'
down_revision = 'a571e57d884a'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_comments_created_at'), 'comments', ['created_at'], unique=False)
op.create_index(op.f('ix_event_hooks_created_at'), 'event_hooks', ['created_at'], unique=False)
op.create_index(op.f('ix_experiment_actions_created_at'), 'experiment_actions', ['created_at'], unique=False)
op.create_index(op.f('ix_experiment_thing_snapshots_created_at'), 'experiment_thing_snapshots', ['created_at'], unique=False)
op.create_index(op.f('ix_experiment_things_created_at'), 'experiment_things', ['created_at'], unique=False)
op.create_index(op.f('ix_experiments_created_at'), 'experiments', ['created_at'], unique=False)
op.create_index(op.f('ix_front_pages_created_at'), 'front_pages', ['created_at'], unique=False)
op.create_index(op.f('ix_mod_actions_created_at'), 'mod_actions', ['created_at'], unique=False)
op.create_index(op.f('ix_posts_created_at'), 'posts', ['created_at'], unique=False)
op.create_index(op.f('ix_praw_keys_created_at'), 'praw_keys', ['created_at'], unique=False)
op.create_index(op.f('ix_subreddit_pages_created_at'), 'subreddit_pages', ['created_at'], unique=False)
op.create_index(op.f('ix_subreddits_created_at'), 'subreddits', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_development():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_subreddits_created_at'), table_name='subreddits')
op.drop_index(op.f('ix_subreddit_pages_created_at'), table_name='subreddit_pages')
op.drop_index(op.f('ix_praw_keys_created_at'), table_name='praw_keys')
op.drop_index(op.f('ix_posts_created_at'), table_name='posts')
op.drop_index(op.f('ix_mod_actions_created_at'), table_name='mod_actions')
op.drop_index(op.f('ix_front_pages_created_at'), table_name='front_pages')
op.drop_index(op.f('ix_experiments_created_at'), table_name='experiments')
op.drop_index(op.f('ix_experiment_things_created_at'), table_name='experiment_things')
op.drop_index(op.f('ix_experiment_thing_snapshots_created_at'), table_name='experiment_thing_snapshots')
op.drop_index(op.f('ix_experiment_actions_created_at'), table_name='experiment_actions')
op.drop_index(op.f('ix_event_hooks_created_at'), table_name='event_hooks')
op.drop_index(op.f('ix_comments_created_at'), table_name='comments')
### end Alembic commands ###
def upgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_comments_created_at'), 'comments', ['created_at'], unique=False)
op.create_index(op.f('ix_event_hooks_created_at'), 'event_hooks', ['created_at'], unique=False)
op.create_index(op.f('ix_experiment_actions_created_at'), 'experiment_actions', ['created_at'], unique=False)
op.create_index(op.f('ix_experiment_thing_snapshots_created_at'), 'experiment_thing_snapshots', ['created_at'], unique=False)
op.create_index(op.f('ix_experiment_things_created_at'), 'experiment_things', ['created_at'], unique=False)
op.create_index(op.f('ix_experiments_created_at'), 'experiments', ['created_at'], unique=False)
op.create_index(op.f('ix_front_pages_created_at'), 'front_pages', ['created_at'], unique=False)
op.create_index(op.f('ix_mod_actions_created_at'), 'mod_actions', ['created_at'], unique=False)
op.create_index(op.f('ix_posts_created_at'), 'posts', ['created_at'], unique=False)
op.create_index(op.f('ix_praw_keys_created_at'), 'praw_keys', ['created_at'], unique=False)
op.create_index(op.f('ix_subreddit_pages_created_at'), 'subreddit_pages', ['created_at'], unique=False)
op.create_index(op.f('ix_subreddits_created_at'), 'subreddits', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_test():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_subreddits_created_at'), table_name='subreddits')
op.drop_index(op.f('ix_subreddit_pages_created_at'), table_name='subreddit_pages')
op.drop_index(op.f('ix_praw_keys_created_at'), table_name='praw_keys')
op.drop_index(op.f('ix_posts_created_at'), table_name='posts')
op.drop_index(op.f('ix_mod_actions_created_at'), table_name='mod_actions')
op.drop_index(op.f('ix_front_pages_created_at'), table_name='front_pages')
op.drop_index(op.f('ix_experiments_created_at'), table_name='experiments')
op.drop_index(op.f('ix_experiment_things_created_at'), table_name='experiment_things')
op.drop_index(op.f('ix_experiment_thing_snapshots_created_at'), table_name='experiment_thing_snapshots')
op.drop_index(op.f('ix_experiment_actions_created_at'), table_name='experiment_actions')
op.drop_index(op.f('ix_event_hooks_created_at'), table_name='event_hooks')
op.drop_index(op.f('ix_comments_created_at'), table_name='comments')
### end Alembic commands ###
def upgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_comments_created_at'), 'comments', ['created_at'], unique=False)
op.create_index(op.f('ix_event_hooks_created_at'), 'event_hooks', ['created_at'], unique=False)
op.create_index(op.f('ix_experiment_actions_created_at'), 'experiment_actions', ['created_at'], unique=False)
op.create_index(op.f('ix_experiment_thing_snapshots_created_at'), 'experiment_thing_snapshots', ['created_at'], unique=False)
op.create_index(op.f('ix_experiment_things_created_at'), 'experiment_things', ['created_at'], unique=False)
op.create_index(op.f('ix_experiments_created_at'), 'experiments', ['created_at'], unique=False)
op.create_index(op.f('ix_front_pages_created_at'), 'front_pages', ['created_at'], unique=False)
op.create_index(op.f('ix_mod_actions_created_at'), 'mod_actions', ['created_at'], unique=False)
op.create_index(op.f('ix_posts_created_at'), 'posts', ['created_at'], unique=False)
op.create_index(op.f('ix_praw_keys_created_at'), 'praw_keys', ['created_at'], unique=False)
op.create_index(op.f('ix_subreddit_pages_created_at'), 'subreddit_pages', ['created_at'], unique=False)
op.create_index(op.f('ix_subreddits_created_at'), 'subreddits', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_production():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_subreddits_created_at'), table_name='subreddits')
op.drop_index(op.f('ix_subreddit_pages_created_at'), table_name='subreddit_pages')
op.drop_index(op.f('ix_praw_keys_created_at'), table_name='praw_keys')
op.drop_index(op.f('ix_posts_created_at'), table_name='posts')
op.drop_index(op.f('ix_mod_actions_created_at'), table_name='mod_actions')
op.drop_index(op.f('ix_front_pages_created_at'), table_name='front_pages')
op.drop_index(op.f('ix_experiments_created_at'), table_name='experiments')
op.drop_index(op.f('ix_experiment_things_created_at'), table_name='experiment_things')
op.drop_index(op.f('ix_experiment_thing_snapshots_created_at'), table_name='experiment_thing_snapshots')
op.drop_index(op.f('ix_experiment_actions_created_at'), table_name='experiment_actions')
op.drop_index(op.f('ix_event_hooks_created_at'), table_name='event_hooks')
op.drop_index(op.f('ix_comments_created_at'), table_name='comments')
### end Alembic commands ###
| 60.251908
| 129
| 0.750538
| 1,183
| 7,893
| 4.598478
| 0.061708
| 0.180331
| 0.105882
| 0.132353
| 0.937132
| 0.937132
| 0.937132
| 0.937132
| 0.937132
| 0.927941
| 0
| 0.006986
| 0.093247
| 7,893
| 130
| 130
| 60.715385
| 0.753109
| 0.07665
| 0
| 0.818182
| 0
| 0
| 0.449348
| 0.272273
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.022727
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e980be0b26696a1bee1cb7665825312b435565cb
| 47
|
py
|
Python
|
tests/test_slacki.py
|
erdogant/slacki
|
a1ce272e05d75251d7649758a6b372e126a8b273
|
[
"MIT"
] | null | null | null |
tests/test_slacki.py
|
erdogant/slacki
|
a1ce272e05d75251d7649758a6b372e126a8b273
|
[
"MIT"
] | null | null | null |
tests/test_slacki.py
|
erdogant/slacki
|
a1ce272e05d75251d7649758a6b372e126a8b273
|
[
"MIT"
] | 1
|
2022-01-05T00:16:47.000Z
|
2022-01-05T00:16:47.000Z
|
import slacki as slacki
def test_plot():
pass
| 11.75
| 23
| 0.765957
| 8
| 47
| 4.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 4
| 24
| 11.75
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
e9ba1570939bf9697834e7724d000ffdc4c824f6
| 29,374
|
py
|
Python
|
spark_fhir_schemas/stu3/complex_types/consent.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/stu3/complex_types/consent.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/stu3/complex_types/consent.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class ConsentSchema:
"""
A record of a healthcare consumer’s policy choices, which permits or denies
identified recipient(s) or recipient role(s) to perform one or more actions
within a given policy context, for specific purposes and periods of time.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
A record of a healthcare consumer’s policy choices, which permits or denies
identified recipient(s) or recipient role(s) to perform one or more actions
within a given policy context, for specific purposes and periods of time.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a Consent resource
identifier: Unique identifier for this copy of the Consent Statement.
status: Indicates the current state of this consent.
category: A classification of the type of consents found in the statement. This element
supports indexing and retrieval of consent statements.
patient: The patient/healthcare consumer to whom this consent applies.
period: Relevant time or time-period when this Consent is applicable.
dateTime: When this Consent was issued / created / indexed.
consentingParty: Either the Grantor, which is the entity responsible for granting the rights
listed in a Consent Directive or the Grantee, which is the entity responsible
for complying with the Consent Directive, including any obligations or
limitations on authorizations and enforcement of prohibitions.
actor: Who or what is controlled by this consent. Use group to identify a set of
actors by some property they share (e.g. 'admitting officers').
action: Actions controlled by this consent.
organization: The organization that manages the consent, and the framework within which it
is executed.
sourceAttachment: The source on which this consent statement is based. The source might be a
scanned original paper form, or a reference to a consent that links back to
such a source, a reference to a document repository (e.g. XDS) that stores the
original consent document.
sourceIdentifier: The source on which this consent statement is based. The source might be a
scanned original paper form, or a reference to a consent that links back to
such a source, a reference to a document repository (e.g. XDS) that stores the
original consent document.
sourceReference: The source on which this consent statement is based. The source might be a
scanned original paper form, or a reference to a consent that links back to
such a source, a reference to a document repository (e.g. XDS) that stores the
original consent document.
policy: The references to the policies that are included in this consent scope.
Policies may be organizational, but are often defined jurisdictionally, or in
law.
policyRule: A referece to the specific computable policy.
securityLabel: A set of security labels that define which resources are controlled by this
consent. If more than one label is specified, all resources must have all the
specified labels.
purpose: The context of the activities a user is taking - why the user is accessing the
data - that are controlled by this consent.
dataPeriod: Clinical or Operational Relevant period of time that bounds the data
controlled by this consent.
data: The resources controlled by this consent, if specific resources are
referenced.
except: An exception to the base policy of this consent. An exception can be an
addition or removal of access permissions.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
from spark_fhir_schemas.stu3.complex_types.consent_actor import (
Consent_ActorSchema,
)
from spark_fhir_schemas.stu3.complex_types.attachment import AttachmentSchema
from spark_fhir_schemas.stu3.complex_types.consent_policy import (
Consent_PolicySchema,
)
from spark_fhir_schemas.stu3.complex_types.coding import CodingSchema
from spark_fhir_schemas.stu3.complex_types.consent_data import (
Consent_DataSchema,
)
from spark_fhir_schemas.stu3.complex_types.consent_except import (
Consent_ExceptSchema,
)
if (
max_recursion_limit and nesting_list.count("Consent") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["Consent"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a Consent resource
StructField("resourceType", StringType(), True),
# Unique identifier for this copy of the Consent Statement.
StructField(
"identifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates the current state of this consent.
StructField("status", StringType(), True),
# A classification of the type of consents found in the statement. This element
# supports indexing and retrieval of consent statements.
StructField(
"category",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The patient/healthcare consumer to whom this consent applies.
StructField(
"patient",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Relevant time or time-period when this Consent is applicable.
StructField(
"period",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# When this Consent was issued / created / indexed.
StructField("dateTime", StringType(), True),
# Either the Grantor, which is the entity responsible for granting the rights
# listed in a Consent Directive or the Grantee, which is the entity responsible
# for complying with the Consent Directive, including any obligations or
# limitations on authorizations and enforcement of prohibitions.
StructField(
"consentingParty",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Who or what is controlled by this consent. Use group to identify a set of
# actors by some property they share (e.g. 'admitting officers').
StructField(
"actor",
ArrayType(
Consent_ActorSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Actions controlled by this consent.
StructField(
"action",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The organization that manages the consent, and the framework within which it
# is executed.
StructField(
"organization",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The source on which this consent statement is based. The source might be a
# scanned original paper form, or a reference to a consent that links back to
# such a source, a reference to a document repository (e.g. XDS) that stores the
# original consent document.
StructField(
"sourceAttachment",
AttachmentSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The source on which this consent statement is based. The source might be a
# scanned original paper form, or a reference to a consent that links back to
# such a source, a reference to a document repository (e.g. XDS) that stores the
# original consent document.
StructField(
"sourceIdentifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The source on which this consent statement is based. The source might be a
# scanned original paper form, or a reference to a consent that links back to
# such a source, a reference to a document repository (e.g. XDS) that stores the
# original consent document.
StructField(
"sourceReference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The references to the policies that are included in this consent scope.
# Policies may be organizational, but are often defined jurisdictionally, or in
# law.
StructField(
"policy",
ArrayType(
Consent_PolicySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A referece to the specific computable policy.
StructField("policyRule", StringType(), True),
# A set of security labels that define which resources are controlled by this
# consent. If more than one label is specified, all resources must have all the
# specified labels.
StructField(
"securityLabel",
ArrayType(
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The context of the activities a user is taking - why the user is accessing the
# data - that are controlled by this consent.
StructField(
"purpose",
ArrayType(
CodingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Clinical or Operational Relevant period of time that bounds the data
# controlled by this consent.
StructField(
"dataPeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The resources controlled by this consent, if specific resources are
# referenced.
StructField(
"data",
ArrayType(
Consent_DataSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# An exception to the base policy of this consent. An exception can be an
# addition or removal of access permissions.
StructField(
"except",
ArrayType(
Consent_ExceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 50.732297
| 100
| 0.546912
| 2,779
| 29,374
| 5.570349
| 0.127384
| 0.068992
| 0.043605
| 0.065116
| 0.84509
| 0.831395
| 0.831395
| 0.79593
| 0.784755
| 0.766602
| 0
| 0.002894
| 0.411861
| 29,374
| 578
| 101
| 50.820069
| 0.893147
| 0.313883
| 0
| 0.700508
| 1
| 0
| 0.021967
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002538
| false
| 0
| 0.040609
| 0
| 0.050761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
75b1251469b3a5895fb1c687c35e51af3c48b161
| 124
|
py
|
Python
|
main.py
|
Amazeryogo/surf-exel
|
0d6a43a7ba2b059f61405db846e546308a035733
|
[
"MIT"
] | 3
|
2020-08-12T05:59:47.000Z
|
2020-11-08T00:01:04.000Z
|
main.py
|
Amazeryogo/surf-exel
|
0d6a43a7ba2b059f61405db846e546308a035733
|
[
"MIT"
] | 8
|
2020-08-19T06:24:06.000Z
|
2020-10-27T04:37:46.000Z
|
main.py
|
Amazeryogo/surf-exel
|
0d6a43a7ba2b059f61405db846e546308a035733
|
[
"MIT"
] | 1
|
2020-10-25T13:35:17.000Z
|
2020-10-25T13:35:17.000Z
|
import editor
import platform
from editor import root
from editor import *
from editor.settings import *
root.mainloop()
| 13.777778
| 29
| 0.790323
| 17
| 124
| 5.764706
| 0.411765
| 0.367347
| 0.326531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16129
| 124
| 8
| 30
| 15.5
| 0.942308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
75bc6d2c8b1918160018a8a2b41167bf9dd58f64
| 17,362
|
py
|
Python
|
Q-score_Analysis/REcount_split_fastq_Q-score_plots.py
|
ascendo/REcount
|
6a4e3f630a0d87d709fa99bc8808506d75317c64
|
[
"MIT"
] | 1
|
2019-11-11T18:43:38.000Z
|
2019-11-11T18:43:38.000Z
|
Q-score_Analysis/REcount_split_fastq_Q-score_plots.py
|
ascendo/REcount
|
6a4e3f630a0d87d709fa99bc8808506d75317c64
|
[
"MIT"
] | null | null | null |
Q-score_Analysis/REcount_split_fastq_Q-score_plots.py
|
ascendo/REcount
|
6a4e3f630a0d87d709fa99bc8808506d75317c64
|
[
"MIT"
] | 3
|
2018-08-10T19:41:27.000Z
|
2019-11-12T16:16:09.000Z
|
from Bio import SeqIO
import regex
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import gzip
#Input args
#filename = sys.argv[1] #Input fastq file
#Ref_filename = sys.argv[2] #Barcode reference file
filename = "<PATH_TO_FASTQ_FILE>"
Ref_filename = "<PATH_TO_UMGC_423_Variable.fasta>"
folder = "<OUTPUT_DIRECTORY>"
instrument = "<INSTRUMENT>"
if not os.path.exists(folder):
os.makedirs(folder)
#Count number of records in the file
count = 0
for record in SeqIO.parse(filename, "fastq"):
count += 1
print("There were " + str(count) + " records in file " + filename)
total_recs = count
#Count number of records in the file
count = 0
for record in SeqIO.parse(Ref_filename, "fasta"):
count += 1
Ref_rec = count
print("There were " + str(Ref_rec) + " records in the reference database")
#Count up barcodes in fastq file
bc_ID_list = [] #Construct name
bc_seq_list = [] #Barcode sequence
count_list = [] #Barcode counts
bc_all_list = []
qual_list = []
for record in SeqIO.parse(Ref_filename, "fasta"):
count = 0
bc_sub_list = []
temp_fastq_list = []
R_temp = []
bc_ID = record.id #Collect standard IDs from reference file
bc_seq = str(record.seq) #Collect standard barcode sequences from reference file
for i in SeqIO.parse(filename, "fastq"):
query = r'(?:' + bc_seq +'){s<=2}' #fuzzy matching - allow up to 2 mismatches
test = regex.findall(query, str(i.seq[:20]))
if test != []: #Comment for exact matching
#if str(i.seq[:20]) == bc_seq: #Uncomment for exact matching
count += 1
#R_temp.append(i.letter_annotations["phred_quality"])
bc_sub_list.append(str(i.seq[:20]))
temp_fastq_list.append(i)
out_file_name = folder + bc_ID + ".fastq"
SeqIO.write(temp_fastq_list, out_file_name, "fastq")
#qual_list.append(R_temp)
count_list.append(count)
bc_all_list.append(bc_sub_list)
bc_ID_list.append(bc_ID)
bc_seq_list.append(bc_seq)
print("done with " + bc_ID)
os.chdir(folder)
file_names = os.listdir(folder)
data_file_names = []
for i in file_names:
if i[-6:] ==".fastq":
data_file_names.append(i)
for i in data_file_names:
R1 = i[:-6] + "_trimmed.fastq"
execute = "cutadapt -l 50 " + i + " > " + R1
os.system(execute)
file_names = os.listdir(folder)
data_file_names = []
for i in file_names:
if i[-13:] =="trimmed.fastq":
data_file_names.append(i)
data_file_names.sort()
#Extract q-score and read number information from each sample
#Make data lists
out_dir = folder
full_name = []
fname = []
read_num = []
n_reads = []
Q_mean_by_base = []
Q_stdev_by_base = []
Q_mean_overall = []
Q_stdev_overall = []
Q_all = []
for i, item in enumerate(data_file_names):
R_temp = []
counts = 0
for j,record in enumerate(SeqIO.parse(item, "fastq")):
R_temp.append(record.letter_annotations["phred_quality"])
counts += 1
full_name.append(item)
fname.append(item)
read_num.append('1')
n_reads.append(counts)
a = np.array(R_temp)
Q_all.append(a)
Q_mean_bb = np.mean(a, axis=0)
Q_mean_by_base.append(Q_mean_bb)
Q_stdev_bb = np.std(a, axis=0)
Q_stdev_by_base.append(Q_stdev_bb)
Q_mean_o = np.mean(Q_mean_bb)
Q_mean_overall.append(Q_mean_o)
Q_stdev_o = np.std(Q_stdev_bb)
Q_stdev_overall.append(Q_stdev_o)
#print "done with %s" % item
#Make separate lists for R1 and R2
R1_full_name = []
R1_fname = []
R1_read_num = []
R1_n_reads = []
R1_Q_mean_by_base = []
R1_Q_stdev_by_base = []
R1_Q_mean_overall = []
R1_Q_stdev_overall = []
R2_full_name = []
R2_fname = []
R2_read_num = []
R2_n_reads = []
R2_Q_mean_by_base = []
R2_Q_stdev_by_base = []
R2_Q_mean_overall = []
R2_Q_stdev_overall = []
for i, item in enumerate(read_num):
if item == '1':
R1_full_name.append(full_name[i])
R1_fname.append(fname[i][:-14])
R1_read_num.append(read_num[i])
R1_n_reads.append(n_reads[i])
R1_Q_mean_by_base.append(Q_mean_by_base[i])
R1_Q_stdev_by_base.append(Q_stdev_by_base[i])
R1_Q_mean_overall.append(Q_mean_overall[i])
R1_Q_stdev_overall.append(Q_stdev_overall[i])
elif item == '2':
R2_full_name.append(full_name[i])
R2_fname.append(fname[i])
R2_read_num.append(read_num[i])
R2_n_reads.append(n_reads[i])
R2_Q_mean_by_base.append(Q_mean_by_base[i])
R2_Q_stdev_by_base.append(Q_stdev_by_base[i])
R2_Q_mean_overall.append(Q_mean_overall[i])
R2_Q_stdev_overall.append(Q_stdev_overall[i])
os.chdir(out_dir)
#Plot q-scores by sample
figure_width = (len(R1_fname)/48)*6
if figure_width<12:
figure_width=12
fig = plt.figure(figsize=(figure_width,8))
#plt.style.use('classic')
ax = fig.add_subplot(111)
#plt.title('Average Q-score by sample')
ax.set_ylim(0, 45)
ax.set_ylabel('Mean Q-score')
x = range(len(R1_fname))
x2 = [y+0.5 for y in x]
x3 = [y+0.25 for y in x]
ax.errorbar(x,R1_Q_mean_overall,yerr=[R1_Q_stdev_overall,R1_Q_stdev_overall], fmt='o', color='black', ecolor='lightgray', elinewidth=3, capsize=0, label = "Read 1")
plt.xticks(x, R1_fname, rotation='vertical', fontsize=12)
if len(R2_full_name) != 0:
ax.errorbar(x2,R2_Q_mean_overall,yerr=[R2_Q_stdev_overall,R2_Q_stdev_overall], fmt='o', color='red', ecolor='lightgray', elinewidth=3, capsize=0, label = "Read 2")
plt.xticks(x3, R1_fname, rotation='vertical', fontsize=12)
#plt.margins(figure_width/24000.0)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
#plt.subplots_adjust(bottom=0.5)
#plt.show()
plt.tight_layout()
#plt.legend(numpoints=1, frameon=False, loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('Q_score_by_sample', bbox_inches='tight', format='png')
#Box plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylabel('Q-score')
ax.boxplot(Q_all)
xtickNames = plt.setp(ax, xticklabels=R1_fname)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90, fontsize=8)
plt.gcf().subplots_adjust(bottom=0.26)
#plt.show()
plt.savefig('Q_score_Boxplot', format='png')
#Plot read numbers by sample
figure_width = (len(R1_fname)/48)*6
if figure_width<12:
figure_width=12
fig = plt.figure(figsize=(figure_width,8))#plt.style.use('classic')
ax = fig.add_subplot(111)
#plt.title('Average Q-score by sample')
ax.set_ylabel('Number of reads')
x = range(len(R1_fname))
x2 = [y+0.5 for y in x]
ax.bar(x,R1_n_reads, color='black')
plt.xticks(x2, R1_fname, rotation='vertical', fontsize=8)
#plt.margins(0.05,0)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
#plt.show()
plt.tight_layout()
plt.savefig('Read_number_by_sample', bbox_inches='tight', format='png')
#Q-score heatmap
import seaborn as sns
# build the figure instance with the desired height
# Two subplots, unpack the axes array immediately
if len(R2_full_name) != 0:
sns.set_style("whitegrid")
grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
figure_height = (len(R1_fname)/48)*6
if figure_height<12:
figure_height=12
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True,gridspec_kw=grid_kws, figsize=(12,figure_height))
ax1 = sns.heatmap(R1_Q_mean_by_base, ax=ax1, cmap="RdYlGn",cbar=True, vmin=0)
ax2 = sns.heatmap(R2_Q_mean_by_base, ax=ax2, cmap="RdYlGn",cbar=True, vmin=0)
ax1.set_xlabel('Position (read 1)')
ax1.set_ylabel('Samples')
names = R1_fname[::-1]
ax1.set_yticklabels(names, fontsize=6, rotation="horizontal")
ax1.axes.xaxis.set_ticklabels([])
#ax1.axes.yaxis.set_ticklabels([])
ax2.set_xlabel('Position (read 2)')
ax2.axes.xaxis.set_ticklabels([])
# let seaborn do it's thing
#ax = sns.heatmap(R1_Q_mean_by_base, ax=ax, cmap="RdYlGn")
#sns.heatmap(R1_Q_mean_by_base)
plt.savefig('Qscore_heatmap', bbox_inches='tight', format='png', dpi=300)
else:
sns.set_style("whitegrid")
grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
fig, ax1 = plt.subplots(1, 1, sharey=True)#,gridspec_kw=grid_kws)
ax1 = sns.heatmap(R1_Q_mean_by_base, ax=ax1, cmap="RdYlGn",cbar=True)#, vmin=0)
ax1.set_xlabel('Position (read 1)')
ax1.set_ylabel('Samples')
names = R1_fname[::-1]
ax1.set_yticklabels(names, fontsize=6, rotation="horizontal")
ax1.axes.xaxis.set_ticklabels([])
#ax1.axes.yaxis.set_ticklabels([])
# let seaborn do it's thing
#ax = sns.heatmap(R1_Q_mean_by_base, ax=ax, cmap="RdYlGn")
#sns.heatmap(R1_Q_mean_by_base)
plt.savefig('Qscore_heatmap', bbox_inches='tight', format='png', dpi=300)
save_name = (instrument + "_indiv_Q_score_data.txt")
save_file = open(save_name, "w")
newtab = '\t'
newline = '\n'
save_file.write(instrument)
save_file.write(newtab)
for i in R1_fname:
save_file.write(i)
save_file.write(newtab)
save_file.write(newline)
save_file.write("Mean Q score")
save_file.write(newtab)
for i in R1_Q_mean_overall:
save_file.write(str(i))
save_file.write(newtab)
save_file.write(newline)
save_file.write("Standard Deviation Q score")
save_file.write(newtab)
for i in R1_Q_stdev_overall:
save_file.write(str(i))
save_file.write(newtab)
save_file.write(newline)
save_file.write("Read count")
save_file.write(newtab)
for i in R1_n_reads:
save_file.write(str(i))
save_file.write(newtab)
save_file.close()
#Sum all standards for a given size
size_bins = []
for i in data_file_names:
temp = i.split("_")[3]
size_bins.append(temp)
unique_sizes = []
for i in size_bins:
if i not in unique_sizes:
unique_sizes.append(i)
#concatenate same size files
for i in unique_sizes:
temp_concat = []
size_search = "_" + i + "_"
for j in data_file_names:
if j.find(size_search) != -1:
temp_concat.append(j)
execute = "cat " + temp_concat[0] + " " + temp_concat[1] + " " + temp_concat[2] + " > " + i + "_concat.fastq"
os.system(execute)
file_names = os.listdir(folder)
concat_data_files = []
for i in file_names:
if i[-12:] == 'concat.fastq':
concat_data_files.append(i)
concat_files_sorted = []
for i in unique_sizes:
for j in concat_data_files:
if j.split("_")[0] == i:
concat_files_sorted.append(j)
#Make data lists
out_dir = folder
full_name = []
fname = []
read_num = []
n_reads = []
Q_mean_by_base = []
Q_stdev_by_base = []
Q_mean_overall = []
Q_stdev_overall = []
Q_all = []
for i, item in enumerate(concat_files_sorted):
R_temp = []
counts = 0
for j,record in enumerate(SeqIO.parse(item, "fastq")):
R_temp.append(record.letter_annotations["phred_quality"])
counts += 1
full_name.append(item)
fname.append(item)
read_num.append('1')
n_reads.append(counts)
a = np.array(R_temp)
Q_all.append(a)
Q_mean_bb = np.mean(a, axis=0)
Q_mean_by_base.append(Q_mean_bb)
Q_stdev_bb = np.std(a, axis=0)
Q_stdev_by_base.append(Q_stdev_bb)
Q_mean_o = np.mean(Q_mean_bb)
Q_mean_overall.append(Q_mean_o)
Q_stdev_o = np.std(Q_stdev_bb)
Q_stdev_overall.append(Q_stdev_o)
#print "done with %s" % item
#Make separate lists for R1 and R2
R1_full_name = []
R1_fname = []
R1_read_num = []
R1_n_reads = []
R1_Q_mean_by_base = []
R1_Q_stdev_by_base = []
R1_Q_mean_overall = []
R1_Q_stdev_overall = []
R2_full_name = []
R2_fname = []
R2_read_num = []
R2_n_reads = []
R2_Q_mean_by_base = []
R2_Q_stdev_by_base = []
R2_Q_mean_overall = []
R2_Q_stdev_overall = []
for i, item in enumerate(read_num):
if item == '1':
R1_full_name.append(full_name[i])
R1_fname.append(fname[i][:-14])
R1_read_num.append(read_num[i])
R1_n_reads.append(n_reads[i])
R1_Q_mean_by_base.append(Q_mean_by_base[i])
R1_Q_stdev_by_base.append(Q_stdev_by_base[i])
R1_Q_mean_overall.append(Q_mean_overall[i])
R1_Q_stdev_overall.append(Q_stdev_overall[i])
elif item == '2':
R2_full_name.append(full_name[i])
R2_fname.append(fname[i])
R2_read_num.append(read_num[i])
R2_n_reads.append(n_reads[i])
R2_Q_mean_by_base.append(Q_mean_by_base[i])
R2_Q_stdev_by_base.append(Q_stdev_by_base[i])
R2_Q_mean_overall.append(Q_mean_overall[i])
R2_Q_stdev_overall.append(Q_stdev_overall[i])
os.chdir(out_dir)
#Plot q-scores by sample
figure_width = (len(unique_sizes)/48)*6
if figure_width<12:
figure_width=12
fig = plt.figure(figsize=(figure_width,8))
#plt.style.use('classic')
ax = fig.add_subplot(111)
#plt.title('Average Q-score by sample')
ax.set_ylim(25, 40)
ax.set_ylabel('Mean Q-score')
x = range(len(unique_sizes))
x2 = [y+0.5 for y in x]
x3 = [y+0.25 for y in x]
ax.errorbar(x,R1_Q_mean_overall,yerr=[R1_Q_stdev_overall,R1_Q_stdev_overall], fmt='o', color='black', ecolor='lightgray', elinewidth=3, capsize=0, label = "Read 1")
plt.xticks(x, unique_sizes, rotation='vertical', fontsize=12)
if len(R2_full_name) != 0:
ax.errorbar(x2,R2_Q_mean_overall,yerr=[R2_Q_stdev_overall,R2_Q_stdev_overall], fmt='o', color='red', ecolor='lightgray', elinewidth=3, capsize=0, label = "Read 2")
plt.xticks(x3, unique_sizes, rotation='vertical', fontsize=12)
#plt.margins(figure_width/24000.0)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
#plt.subplots_adjust(bottom=0.5)
#plt.show()
plt.tight_layout()
#plt.legend(numpoints=1, frameon=False, loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('Q_score_by_sample_grouped', bbox_inches='tight', format='png')
#Box plot
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylabel('Q-score')
ax.boxplot(Q_all)
xtickNames = plt.setp(ax, xticklabels=unique_sizes)
locs, labels = plt.xticks()
plt.setp(labels, rotation=90, fontsize=8)
plt.gcf().subplots_adjust(bottom=0.26)
#plt.show()
plt.savefig('Q_score_Boxplot_grouped', format='png')
#Plot read numbers by sample
figure_width = (len(unique_sizes)/48)*6
if figure_width<12:
figure_width=12
fig = plt.figure(figsize=(figure_width,8))#plt.style.use('classic')
ax = fig.add_subplot(111)
#plt.title('Average Q-score by sample')
ax.set_ylabel('Number of reads')
x = range(len(unique_sizes))
x2 = [y+0.5 for y in x]
ax.bar(x,R1_n_reads, color='black')
plt.xticks(x2, unique_sizes, rotation='vertical', fontsize=8)
#plt.margins(0.05,0)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
#plt.show()
plt.tight_layout()
plt.savefig('Read_number_by_sample_grouped', bbox_inches='tight', format='png')
#Q-score heatmap
import seaborn as sns
# build the figure instance with the desired height
# Two subplots, unpack the axes array immediately
if len(R2_full_name) != 0:
sns.set_style("whitegrid")
grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
figure_height = (len(unique_sizes)/48)*6
if figure_height<12:
figure_height=12
fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True,gridspec_kw=grid_kws, figsize=(12,figure_height))
ax1 = sns.heatmap(R1_Q_mean_by_base, ax=ax1, cmap="RdYlGn",cbar=True, vmin=0)
ax2 = sns.heatmap(R2_Q_mean_by_base, ax=ax2, cmap="RdYlGn",cbar=True, vmin=0)
ax1.set_xlabel('Position (read 1)')
ax1.set_ylabel('Samples')
names = unique_sizes[::-1]
ax1.set_yticklabels(names, fontsize=6, rotation="horizontal")
ax1.axes.xaxis.set_ticklabels([])
#ax1.axes.yaxis.set_ticklabels([])
ax2.set_xlabel('Position (read 2)')
ax2.axes.xaxis.set_ticklabels([])
# let seaborn do it's thing
#ax = sns.heatmap(R1_Q_mean_by_base, ax=ax, cmap="RdYlGn")
#sns.heatmap(R1_Q_mean_by_base)
plt.savefig('Qscore_heatmap', bbox_inches='tight', format='png', dpi=300)
else:
sns.set_style("whitegrid")
grid_kws = {"height_ratios": (.9, .05), "hspace": .3}
fig, ax1 = plt.subplots(1, 1, sharey=True)#,gridspec_kw=grid_kws)
ax1 = sns.heatmap(R1_Q_mean_by_base, ax=ax1, cmap="RdYlGn",cbar=True)#, vmin=0)
ax1.set_xlabel('Position (read 1)')
ax1.set_ylabel('Samples')
names = unique_sizes[::-1]
ax1.set_yticklabels(names, fontsize=6, rotation="horizontal")
ax1.axes.xaxis.set_ticklabels([])
#ax1.axes.yaxis.set_ticklabels([])
# let seaborn do it's thing
#ax = sns.heatmap(R1_Q_mean_by_base, ax=ax, cmap="RdYlGn")
#sns.heatmap(R1_Q_mean_by_base)
plt.savefig('Qscore_heatmap_grouped', bbox_inches='tight', format='png', dpi=300)
save_name = (instrument + "_grouped_Q_score_data.txt")
save_file = open(save_name, "w")
newtab = '\t'
newline = '\n'
save_file.write(instrument)
save_file.write(newtab)
for i in unique_sizes:
save_file.write(i)
save_file.write(newtab)
save_file.write(newline)
save_file.write("Mean Q score")
save_file.write(newtab)
for i in R1_Q_mean_overall:
save_file.write(str(i))
save_file.write(newtab)
save_file.write(newline)
save_file.write("Standard Deviation Q score")
save_file.write(newtab)
for i in R1_Q_stdev_overall:
save_file.write(str(i))
save_file.write(newtab)
save_file.write(newline)
save_file.write("Read count")
save_file.write(newtab)
for i in R1_n_reads:
save_file.write(str(i))
save_file.write(newtab)
save_file.close()
| 32.391791
| 167
| 0.698019
| 2,904
| 17,362
| 3.897383
| 0.103994
| 0.02739
| 0.043647
| 0.029157
| 0.847411
| 0.838487
| 0.826913
| 0.823732
| 0.817547
| 0.80827
| 0
| 0.029975
| 0.154533
| 17,362
| 535
| 168
| 32.452336
| 0.741059
| 0.140825
| 0
| 0.79021
| 0
| 0
| 0.091136
| 0.013549
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.020979
| 0
| 0.020979
| 0.006993
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
75dd93f5dfa7dabb35bba6269bbdabfd79cb2ce3
| 2,925
|
py
|
Python
|
BCmetric/utilNotAngle.py
|
visdata/UrbanMotionAnalysis
|
423357bb3d8369e174386174aa6209e32473836c
|
[
"Apache-2.0"
] | null | null | null |
BCmetric/utilNotAngle.py
|
visdata/UrbanMotionAnalysis
|
423357bb3d8369e174386174aa6209e32473836c
|
[
"Apache-2.0"
] | null | null | null |
BCmetric/utilNotAngle.py
|
visdata/UrbanMotionAnalysis
|
423357bb3d8369e174386174aa6209e32473836c
|
[
"Apache-2.0"
] | 1
|
2020-04-02T13:16:19.000Z
|
2020-04-02T13:16:19.000Z
|
from math import atan2,sqrt,cos
import numpy as np
def averageDirection(angleArray,n):
return sum(angleArray)/n
def angleDistance(angle):
return angle
def std(angleArray, n, averageDir):
sumValue = sum([pow(angleDistance(angle - averageDir), 2) for angle in angleArray])
return sqrt(float(sumValue)/n)
def kurtosis(angleArray, n, averageDir, std):
sumValue = sum([float(pow(angleDistance(angle - averageDir)/std, 4)) for angle in angleArray])
return sumValue/n
def skewness(angleArray, n, averageDir, std):
sumValue = sum([float(pow(angleDistance(angle - averageDir)/std, 3)) for angle in angleArray])
return sumValue/n
def BCMetric(kurtosisValue, skewnessValue, n):
return (pow(skewnessValue,2) + 1)/(kurtosisValue-3+float(3*pow(n-1,2))/((n-2)*(n-3)))
def BCCal(angleArry):
arrLen = len(angleArry)
averageDir = averageDirection(angleArry, arrLen)
stdValue = std(angleArry,arrLen,averageDir)
kurtosisValue = kurtosis(angleArry, arrLen, averageDir, stdValue)
skewnessValie = skewness(angleArry,arrLen,averageDir,stdValue)
print(averageDir, stdValue, kurtosisValue, skewnessValie)
return BCMetric(kurtosisValue, skewnessValie, arrLen)
#anglearr = [0,0,0,0,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,0,0,0,0,0,0,0,0,0,0,0,0]
#anglearr = [[91.0, 1], [271.0, 1], [270.0, 1], [225.0, 1], [91.0, 1], [90.0, 1], [91.0, 1], [90.0, 1], [206.0, 1], [273.0, 1], [270.0, 1], [255.0, 1], [270.0, 1], [269.0, 1], [91.0, 1], [90.0, 1], [271.0, 1], [270.0, 1], [270.0, 1], [91.0, 1], [90.0, 1], [86.0, 1], [91.0, 1], [92.0, 1], [86.0, 1], [90.0, 1], [91.0, 1], [88.0, 1], [90.0, 1], [270.0, 1], [271.0, 1], [265.0, 1], [83.0, 1], [91.0, 1], [24.0, 1], [90.0, 1], [180.0, 1], [271.0, 1], [270.0, 1], [270.0, 1], [271.0, 1], [270.0, 1], [72.0, 1], [248.0, 1], [271.0, 1], [270.0, 1], [78.0, 1], [91.0, 1], [39.0, 1], [91.0, 1], [270.0, 1], [88.0, 1], [92.0, 1], [89.0, 1], [90.0, 1], [90.0, 1]]
anglearr = [[288.0, 1], [102.0, 1], [95.0, 1], [251.0, 1], [259.0, 1], [355.0, 1], [256.0, 1], [259.0, 1], [89.0, 1], [106.0, 1], [104.0, 1], [242.0, 1], [275.0, 1], [274.0, 1], [89.0, 1], [92.0, 1], [270.0, 1], [254.0, 1], [96.0, 1], [86.0, 1], [277.0, 1], [259.0, 1], [92.0, 1], [273.0, 1], [90.0, 1], [91.0, 1], [29.0, 1], [288.0, 1], [95.0, 1], [80.0, 1], [272.0, 1], [87.0, 1], [355.0, 1], [282.0, 1], [77.0, 1], [82.0, 1], [95.0, 1], [80.0, 1], [275.0, 1], [283.0, 1], [275.0, 1], [79.0, 1], [90.0, 1], [286.0, 1], [272.0, 1], [81.0, 1], [82.0, 1], [94.0, 1], [273.0, 1], [112.0, 1], [86.0, 1]]
anglearr = [elem[0] for elem in anglearr]
print(anglearr)
print(BCCal(anglearr))
| 68.023256
| 653
| 0.588034
| 583
| 2,925
| 2.950257
| 0.157804
| 0.124419
| 0.42907
| 0.565116
| 0.515698
| 0.373256
| 0.373256
| 0.373256
| 0.259302
| 0.259302
| 0
| 0.302621
| 0.139145
| 2,925
| 43
| 654
| 68.023256
| 0.380461
| 0.352821
| 0
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.241379
| false
| 0
| 0.068966
| 0.103448
| 0.551724
| 0.103448
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
75e1470c92d0b818265b0e56ade9dc9637d62d7c
| 113
|
py
|
Python
|
gsfpy/enums.py
|
irewolepeter/gsfpy_USM_Implementation
|
c4614ac3f7d833eb86ea38c7708108b130f96612
|
[
"MIT"
] | 7
|
2020-07-01T07:12:19.000Z
|
2022-01-20T20:39:57.000Z
|
gsfpy/enums.py
|
irewolepeter/gsfpy_USM_Implementation
|
c4614ac3f7d833eb86ea38c7708108b130f96612
|
[
"MIT"
] | 36
|
2020-06-23T09:10:15.000Z
|
2022-03-22T10:27:58.000Z
|
gsfpy/enums.py
|
irewolepeter/gsfpy_USM_Implementation
|
c4614ac3f7d833eb86ea38c7708108b130f96612
|
[
"MIT"
] | 2
|
2021-02-07T13:21:52.000Z
|
2021-06-24T19:16:16.000Z
|
from gsfpy import mirror_default_gsf_version_submodule
mirror_default_gsf_version_submodule(globals(), "enums")
| 28.25
| 56
| 0.876106
| 15
| 113
| 6.066667
| 0.666667
| 0.285714
| 0.351648
| 0.505495
| 0.703297
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.061947
| 113
| 3
| 57
| 37.666667
| 0.858491
| 0
| 0
| 0
| 0
| 0
| 0.044248
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
75fe8619af87086bc0b697003e23ac509eb6915c
| 139
|
py
|
Python
|
strings/tests/test_jewels_and_stones.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
strings/tests/test_jewels_and_stones.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
strings/tests/test_jewels_and_stones.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | 3
|
2020-10-07T20:24:45.000Z
|
2020-12-16T04:53:19.000Z
|
from strings.jewels_and_stones import jewels_and_stones
def test_jewels_and_stones():
assert jewels_and_stones("aA", "aAAbbbb") == 3
| 23.166667
| 55
| 0.784173
| 21
| 139
| 4.761905
| 0.571429
| 0.36
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008197
| 0.122302
| 139
| 5
| 56
| 27.8
| 0.811475
| 0
| 0
| 0
| 0
| 0
| 0.064748
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f95100ad6808c467455a98ab6c27945201ba8b6c
| 5,628
|
py
|
Python
|
post/migrations/0001_initial.py
|
amitdhiman000/dais
|
dd51e20bc19cade7009253f29cf2f63ae2fe3abc
|
[
"Apache-2.0"
] | null | null | null |
post/migrations/0001_initial.py
|
amitdhiman000/dais
|
dd51e20bc19cade7009253f29cf2f63ae2fe3abc
|
[
"Apache-2.0"
] | null | null | null |
post/migrations/0001_initial.py
|
amitdhiman000/dais
|
dd51e20bc19cade7009253f29cf2f63ae2fe3abc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-15 08:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('user', '0002_auto_20161215_0806'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('edited_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved', models.BooleanField(default=False)),
('title', models.CharField(max_length=100)),
('sub_title', models.CharField(max_length=200)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
options={
'abstract': False,
'verbose_name': 'post',
'verbose_name_plural': 'posts',
},
),
migrations.CreateModel(
name='ArticleComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('edited_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved', models.BooleanField(default=False)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='post.Article')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
options={
'abstract': False,
'verbose_name': 'post',
'verbose_name_plural': 'posts',
},
),
migrations.CreateModel(
name='ArticleReaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reaction', models.IntegerField(default=1)),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='post.Article')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
),
migrations.CreateModel(
name='CommentReaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reaction', models.IntegerField(default=1)),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='post.ArticleComment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
),
migrations.CreateModel(
name='ReplyComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('edited_date', models.DateTimeField(default=django.utils.timezone.now)),
('approved', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='post.ReplyComment')),
],
options={
'abstract': False,
'verbose_name': 'post',
'verbose_name_plural': 'posts',
},
),
migrations.CreateModel(
name='ReplyCommentReaction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reaction', models.IntegerField(default=1)),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='post.ReplyComment')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic_name', models.CharField(max_length=50)),
('topic_desc', models.CharField(blank=True, max_length=100)),
('topic_followers', models.IntegerField(default=0)),
('topic_author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
),
migrations.CreateModel(
name='TopicFollower',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='post.Topic')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='user.User')),
],
),
]
| 48.102564
| 118
| 0.575338
| 541
| 5,628
| 5.855823
| 0.166359
| 0.040404
| 0.066288
| 0.104167
| 0.833649
| 0.815341
| 0.815341
| 0.815341
| 0.815341
| 0.815341
| 0
| 0.011817
| 0.278252
| 5,628
| 116
| 119
| 48.517241
| 0.768095
| 0.012082
| 0
| 0.722222
| 1
| 0
| 0.130826
| 0.004139
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f96e7a8752112cf6f4705a65acb0e09cfc8f98ed
| 3,339
|
py
|
Python
|
swahiliapiapp/migrations/0001_initial.py
|
florianschmidt1994/swahili-dictionary
|
301e99a2e1f169ffcc1038a77ecd6658bc4ab864
|
[
"Unlicense"
] | null | null | null |
swahiliapiapp/migrations/0001_initial.py
|
florianschmidt1994/swahili-dictionary
|
301e99a2e1f169ffcc1038a77ecd6658bc4ab864
|
[
"Unlicense"
] | null | null | null |
swahiliapiapp/migrations/0001_initial.py
|
florianschmidt1994/swahili-dictionary
|
301e99a2e1f169ffcc1038a77ecd6658bc4ab864
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-03-07 22:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='English',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('english_definition', models.TextField(blank=True, null=True)),
('note', models.TextField(blank=True, null=True)),
('english_example', models.TextField(blank=True, null=True)),
('swahili_plural', models.TextField(blank=True, null=True)),
('swahili_definition', models.TextField(blank=True, null=True)),
('english_word', models.TextField(blank=True, null=True)),
('english_plural', models.TextField(blank=True, null=True)),
('terminology', models.TextField(blank=True, null=True)),
('part_of_speech', models.TextField(blank=True, null=True)),
('dialect', models.TextField(blank=True, null=True)),
('swahili_word', models.TextField(blank=True, null=True)),
('related_words', models.TextField(blank=True, null=True)),
('taxonomy', models.TextField(blank=True, null=True)),
('derived_word', models.TextField(blank=True, null=True)),
('swahili_example', models.TextField(blank=True, null=True)),
('derived_language', models.TextField(blank=True, null=True)),
('class_field', models.TextField(blank=True, db_column='class', null=True)),
],
options={
'db_table': 'english',
},
),
migrations.CreateModel(
name='Swahili',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('english_definition', models.TextField(blank=True, null=True)),
('note', models.TextField(blank=True, null=True)),
('english_example', models.TextField(blank=True, null=True)),
('swahili_plural', models.TextField(blank=True, null=True)),
('swahili_definition', models.TextField(blank=True, null=True)),
('english_word', models.TextField(blank=True, null=True)),
('english_plural', models.TextField(blank=True, null=True)),
('terminology', models.TextField(blank=True, null=True)),
('part_of_speech', models.TextField(blank=True, null=True)),
('dialect', models.TextField(blank=True, null=True)),
('swahili_word', models.TextField(blank=True, null=True)),
('related_words', models.TextField(blank=True, null=True)),
('taxonomy', models.TextField(blank=True, null=True)),
('derived_word', models.TextField(blank=True, null=True)),
('swahili_example', models.TextField(blank=True, null=True)),
('derived_language', models.TextField(blank=True, null=True)),
('class_field', models.TextField(blank=True, db_column='class', null=True)),
],
options={
'db_table': 'swahili',
},
),
]
| 49.835821
| 92
| 0.567835
| 332
| 3,339
| 5.608434
| 0.165663
| 0.273899
| 0.365199
| 0.438238
| 0.886144
| 0.886144
| 0.886144
| 0.886144
| 0.886144
| 0.886144
| 0
| 0.006253
| 0.281521
| 3,339
| 66
| 93
| 50.590909
| 0.769904
| 0.013477
| 0
| 0.779661
| 1
| 0
| 0.147631
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016949
| 0
| 0.084746
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
f97164d333ac838a3a37526d71b841e4cc91a79b
| 2,179
|
py
|
Python
|
metaroot/mqutils.py
|
cwru-rcci/metaroot
|
24fc0dcce65046bf2ef848edc39041646a00de77
|
[
"MIT"
] | null | null | null |
metaroot/mqutils.py
|
cwru-rcci/metaroot
|
24fc0dcce65046bf2ef848edc39041646a00de77
|
[
"MIT"
] | null | null | null |
metaroot/mqutils.py
|
cwru-rcci/metaroot
|
24fc0dcce65046bf2ef848edc39041646a00de77
|
[
"MIT"
] | 1
|
2022-03-18T17:14:53.000Z
|
2022-03-18T17:14:53.000Z
|
import pika
from metaroot.config import get_global_config
def delete_queue(queue_name: str):
"""
Deletes a queue from the message queue server
Parameters
----------
queue_name: str
The name of the queue to delete
Returns
----------
int
Returns 0 on success
Raises
----------
Exception
If the underlying operations raise an exception
"""
config = get_global_config()
# Pretty standard connection stuff (user, password, etc)
credentials = pika.PlainCredentials(config.get_mq_user(), config.get_mq_pass())
parameters = pika.ConnectionParameters(host=config.get_mq_host(),
port=config.get_mq_port(),
virtual_host='/',
credentials=credentials,
heartbeat=30)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_delete(queue=queue_name)
connection.close()
return 0
def create_queue(queue_name: str):
"""
Creates a durable queue on the message queue server
Parameters
----------
queue_name: str
The name of the queue to delete
Returns
----------
int
Returns 0 on success
Raises
----------
Exception
If the underlying operations raise an exception
"""
config = get_global_config()
# Pretty standard connection stuff
credentials = pika.PlainCredentials(config.get_mq_user(), config.get_mq_pass())
parameters = pika.ConnectionParameters(host=config.get_mq_host(),
port=config.get_mq_port(),
virtual_host='/',
credentials=credentials,
heartbeat=30)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue_name,
durable=True) # request that the queue be persisted to disk
connection.close()
return 0
| 29.849315
| 86
| 0.563561
| 212
| 2,179
| 5.632075
| 0.29717
| 0.075377
| 0.073702
| 0.033501
| 0.763819
| 0.763819
| 0.763819
| 0.763819
| 0.763819
| 0.763819
| 0
| 0.00563
| 0.347866
| 2,179
| 73
| 87
| 29.849315
| 0.834624
| 0.293713
| 0
| 0.758621
| 0
| 0
| 0.001417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0.068966
| 0.068966
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ddb0613d35348187dc4b86ad30cfead4c5587b88
| 241
|
py
|
Python
|
facebook_hateful_memes_detector/models/classifiers/__init__.py
|
faizanahemad/facebook-hateful-memes
|
1f7febf65f5fc4ed4aeb476d5383437f677fbc19
|
[
"MIT"
] | 9
|
2020-07-28T20:33:04.000Z
|
2022-01-28T16:51:40.000Z
|
facebook_hateful_memes_detector/models/classifiers/__init__.py
|
faizanahemad/facebook-hateful-memes
|
1f7febf65f5fc4ed4aeb476d5383437f677fbc19
|
[
"MIT"
] | 3
|
2021-06-08T21:36:37.000Z
|
2021-09-08T02:03:07.000Z
|
facebook_hateful_memes_detector/models/classifiers/__init__.py
|
faizanahemad/facebook-hateful-memes
|
1f7febf65f5fc4ed4aeb476d5383437f677fbc19
|
[
"MIT"
] | 1
|
2020-08-26T08:13:25.000Z
|
2020-08-26T08:13:25.000Z
|
from .CNN1DFeaturizer import CNN1DFeaturizer
from .GRUFeaturizer import GRUFeaturizer
from .TransformerFeaturizer import TransformerFeaturizer, TransformerEnsembleFeaturizer
from .BaseFeaturizer import BasicFeaturizer, PassThroughFeaturizer
| 48.2
| 87
| 0.900415
| 18
| 241
| 12.055556
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008969
| 0.074689
| 241
| 4
| 88
| 60.25
| 0.964126
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
34a3a4aa7235c462c1e03384fce6698571c54525
| 11,420
|
py
|
Python
|
learning_transforms/test_factor_multiply.py
|
sfox14/butterfly
|
13cc15cee5bdb7adaf376219aaf20fab0459e9ef
|
[
"Apache-2.0"
] | 52
|
2020-08-05T08:32:24.000Z
|
2022-03-27T21:56:34.000Z
|
learning_transforms/test_factor_multiply.py
|
sfox14/butterfly
|
13cc15cee5bdb7adaf376219aaf20fab0459e9ef
|
[
"Apache-2.0"
] | 13
|
2020-09-14T23:34:32.000Z
|
2022-02-15T10:51:03.000Z
|
learning_transforms/test_factor_multiply.py
|
sfox14/butterfly
|
13cc15cee5bdb7adaf376219aaf20fab0459e9ef
|
[
"Apache-2.0"
] | 11
|
2020-10-15T07:03:25.000Z
|
2022-03-25T12:03:49.000Z
|
import unittest
import torch
from butterfly_factor import butterfly_factor_mult, butterfly_factor_mult_intermediate
from butterfly import Block2x2DiagProduct
from complex_utils import complex_mul
from factor_multiply import butterfly_multiply_intermediate, butterfly_multiply_intermediate_backward
def twiddle_list_concat(B: Block2x2DiagProduct):
# Assume ordering from largest size to smallest size
if not B.complex:
return torch.cat([factor.ABCD.permute(2, 0, 1) for factor in B.factors[::-1]])
else:
return torch.cat([factor.ABCD.permute(2, 0, 1, 3) for factor in B.factors[::-1]])
class ButterflyFactorTest(unittest.TestCase):
def setUp(self):
self.rtol = 1e-3
self.atol = 1e-5
def test_butterfly_factor_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n)
input_ = torch.randn(batch_size, n, requires_grad=True)
output = input_
for factor in B.factors[::-1]:
prev = output
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(prev.shape)
output_slow = ((factor.ABCD * prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(prev.shape)
self.assertTrue(torch.allclose(output, output_slow, rtol=self.rtol, atol=self.atol), (output - output_slow).abs().max().item())
grad = torch.randn_like(output)
d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_slow, rtol=self.rtol, atol=self.atol), (d_twiddle - d_twiddle_slow).abs().max().item())
self.assertTrue(torch.allclose(d_input, d_input_slow, rtol=self.rtol, atol=self.atol), (d_input - d_input_slow).abs().max().item())
def test_butterfly_factor_complex_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n, complex=True)
input_ = torch.randn(batch_size, n, 2, requires_grad=True)
output = input_
for factor in B.factors[::-1]:
prev = output
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2, 2)).view(prev.shape)
output_slow = (complex_mul(factor.ABCD, prev.view(-1, 1, 2, factor.size // 2, 2)).sum(dim=-3)).view(prev.shape)
self.assertTrue(torch.allclose(output, output_slow, rtol=self.rtol, atol=self.atol), (output - output_slow).abs().max().item())
grad = torch.randn_like(output)
d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_slow, rtol=self.rtol, atol=self.atol), (d_twiddle - d_twiddle_slow).abs().max().item())
self.assertTrue(torch.allclose(d_input, d_input_slow, rtol=self.rtol, atol=self.atol), (d_input - d_input_slow).abs().max().item())
@unittest.skipIf(not torch.cuda.is_available(), "need CUDA")
def test_butterfly_factor_cuda(self):
batch_size = 100
n = 4096 # To test n > MAX_BLOCK_SIZE
B = Block2x2DiagProduct(n).to('cuda')
input_ = torch.randn(batch_size, n, device='cuda', requires_grad=True)
output = input_
for factor in B.factors[::-1]:
prev = output
output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(prev.shape)
output_slow = ((factor.ABCD * prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(prev.shape)
self.assertTrue(torch.allclose(output, output_slow, rtol=self.rtol, atol=self.atol), (output - output_slow).abs().max().item())
grad = torch.randn_like(output)
d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
self.assertTrue(torch.allclose(d_twiddle, d_twiddle_slow, rtol=self.rtol, atol=self.atol), (factor.size, (d_twiddle - d_twiddle_slow).abs().max().item()))
self.assertTrue(torch.allclose(d_input, d_input_slow, rtol=self.rtol, atol=self.atol), (d_input - d_input_slow).abs().max().item())
def test_butterfly_factor_intermediate_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n)
input_ = torch.randn(batch_size, n, requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
def test_butterfly_factor_intermediate_complex_cpu(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n, complex=True)
input_ = torch.randn(batch_size, n, 2, requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2, 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1, 3) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
@unittest.skipIf(not torch.cuda.is_available(), "need CUDA")
def test_butterfly_factor_intermediate_cuda(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n).to('cuda')
input_ = torch.randn(batch_size, n, device='cuda', requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
@unittest.skipIf(not torch.cuda.is_available(), "need CUDA")
def test_butterfly_factor_intermediate_complex_cuda(self):
batch_size = 10
n = 4096
B = Block2x2DiagProduct(n, complex=True).to('cuda')
input_ = torch.randn(batch_size, n, 2, device='cuda', requires_grad=True)
twiddle = twiddle_list_concat(B).unsqueeze(0)
output_intermediate = butterfly_multiply_intermediate(twiddle, input_)
output = [input_]
for factor in B.factors[::-1]:
output.append(butterfly_factor_mult(factor.ABCD, output[-1].view(-1, 2, factor.size // 2, 2)).view(output[-1].shape))
output = torch.stack(output)
self.assertTrue(torch.allclose(output_intermediate.squeeze(2), output, rtol=self.rtol, atol=self.atol), (output_intermediate.squeeze(2) - output).abs().max().item())
grad = torch.randn_like(output[-1])
d_twiddle_intermediate, d_input_intermediate = butterfly_multiply_intermediate_backward(grad.unsqueeze(1), twiddle, output_intermediate)
output[-1].backward(grad, retain_graph=True)
d_input = input_.grad
d_twiddle = torch.cat([factor.ABCD.grad.permute(2, 0, 1, 3) for factor in B.factors[::-1]])
self.assertTrue(torch.allclose(d_input_intermediate, d_input, rtol=self.rtol, atol=self.atol), (d_input_intermediate - d_input).abs().max().item())
self.assertTrue(torch.allclose(d_twiddle_intermediate, d_twiddle, rtol=self.rtol, atol=self.atol), (d_twiddle_intermediate - d_twiddle).abs().max().item())
if __name__ == "__main__":
unittest.main()
# batch_size = 2
# n = 4
# B = Block2x2DiagProduct(n).to('cuda')
# # input_ = torch.randn(batch_size, n, device='cuda', requires_grad=True)
# input_ = torch.arange(batch_size * n, dtype=torch.float, device='cuda', requires_grad=True).view(batch_size, n)
# output = input_
# factor = B.factors[0]
# prev = output
# output = butterfly_factor_mult(factor.ABCD, output.view(-1, 2, factor.size // 2)).view(prev.shape)
# output_slow = ((factor.ABCD * prev.view(-1, 1, 2, factor.size // 2)).sum(dim=-2)).view(prev.shape)
# grad = input_
# d_twiddle, d_input = torch.autograd.grad(output, (factor.ABCD, prev), grad, retain_graph=True)
# d_twiddle_slow, d_input_slow = torch.autograd.grad(output_slow, (factor.ABCD, prev), grad, retain_graph=True)
# print(d_twiddle)
# print(d_twiddle_slow)
# print((factor.size, (d_twiddle - d_twiddle_slow).abs().max().item()))
| 62.404372
| 173
| 0.681961
| 1,569
| 11,420
| 4.745698
| 0.060548
| 0.051571
| 0.029546
| 0.076148
| 0.915794
| 0.905721
| 0.902095
| 0.901961
| 0.890948
| 0.884502
| 0
| 0.019889
| 0.176708
| 11,420
| 182
| 174
| 62.747253
| 0.77207
| 0.078634
| 0
| 0.791667
| 0
| 0
| 0.005618
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 1
| 0.0625
| false
| 0
| 0.041667
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
34b7960d9a7f0b7402b59e09c1c4e321573c2bbf
| 388
|
py
|
Python
|
main/pcse/soil/__init__.py
|
jajberni/pcse_web
|
284b35270061fee61040f41df419cbf9eea32a2e
|
[
"Apache-2.0"
] | 3
|
2017-09-19T10:38:50.000Z
|
2019-10-07T03:47:02.000Z
|
main/pcse/soil/__init__.py
|
jajberni/pcse_web
|
284b35270061fee61040f41df419cbf9eea32a2e
|
[
"Apache-2.0"
] | null | null | null |
main/pcse/soil/__init__.py
|
jajberni/pcse_web
|
284b35270061fee61040f41df419cbf9eea32a2e
|
[
"Apache-2.0"
] | 1
|
2019-10-31T01:11:06.000Z
|
2019-10-31T01:11:06.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2004-2014 Alterra, Wageningen-UR
# Allard de Wit (allard.dewit@wur.nl), April 2014
from .classic_waterbalance import WaterbalancePP
from .classic_waterbalance import WaterbalanceFD
from .classic_waterbalance import WaterbalanceFDSnow
from .snowmaus import SnowMAUS
from .waterbalance import WaterbalanceLayered
from .lintul3soil import Lintul3Soil
| 38.8
| 52
| 0.819588
| 46
| 388
| 6.847826
| 0.586957
| 0.228571
| 0.219048
| 0.27619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043353
| 0.108247
| 388
| 9
| 53
| 43.111111
| 0.867052
| 0.298969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
550dfb3e568867b1620b1542cb448febace2d181
| 113
|
py
|
Python
|
module1.py
|
JaeGyu/PythonEx_1
|
e67053db6ca7431c3dd66351c190c53229e3f141
|
[
"MIT"
] | null | null | null |
module1.py
|
JaeGyu/PythonEx_1
|
e67053db6ca7431c3dd66351c190c53229e3f141
|
[
"MIT"
] | null | null | null |
module1.py
|
JaeGyu/PythonEx_1
|
e67053db6ca7431c3dd66351c190c53229e3f141
|
[
"MIT"
] | null | null | null |
import singletone
print(singletone.only_one_var)
singletone.only_one_var += " after modification"
import module2
| 22.6
| 48
| 0.840708
| 15
| 113
| 6.066667
| 0.6
| 0.307692
| 0.373626
| 0.43956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 0.088496
| 113
| 4
| 49
| 28.25
| 0.873786
| 0
| 0
| 0
| 0
| 0
| 0.168142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
9b4c48b28ba43e08118749081ebe4e816e76bc55
| 33,674
|
py
|
Python
|
u3v2 climas funcionando/Juego/antiguomovparabolico.py
|
Muteado/proyecto
|
54cf8babe150a33f75851f6686094de8f743d332
|
[
"MIT"
] | null | null | null |
u3v2 climas funcionando/Juego/antiguomovparabolico.py
|
Muteado/proyecto
|
54cf8babe150a33f75851f6686094de8f743d332
|
[
"MIT"
] | null | null | null |
u3v2 climas funcionando/Juego/antiguomovparabolico.py
|
Muteado/proyecto
|
54cf8babe150a33f75851f6686094de8f743d332
|
[
"MIT"
] | null | null | null |
'''
class Lanzamiento:
# ------------------------------
# Función principal del juego
# ------------------------------
def lanzamiento(botonamarillo,botonnaranja,botonmorado,aux):
# se define la letra por defecto
fuente = Textos.fuentes(None, 50)
prueba = 0
# se crea un proyectil a lanzar
if Turno[0] == 1:
#EleccionbalaAzul[0] = int(input("1. 105 mm \n2. perforante \n3. 60 mm \nIngrese su bala: "))
Turnos.balasturnos(balaspj1)
if Turno[0] == 2:
#EleccionbalaRojo[0] = int(input("1. 105 mm \n2. perforante \n3. 60 mm \nIngrese su bala: "))
Turnos.balasturnos(balaspj2)
if Turno[0] == 1:
Movimiento.angulos(0,1,Angulo_Azul[0], Velocidad_Azul[0])
elif Turno[0] == 2:
#Tanque Rojo
angulo = Angulo_Azul[0]
if angulo < 90:
bala = Proyectil(X_Y_Tanques[2]+10, X_Y_Tanques[3], Angulo_Azul[0], Velocidad_Azul[0])#velocidad,angulo
if angulo == 90:
bala = Proyectil(X_Y_Tanques[2], X_Y_Tanques[3]-10, Angulo_Azul[0], Velocidad_Azul[0])#velocidad,angulo
if angulo > 90:
bala = Proyectil(X_Y_Tanques[2]-10, X_Y_Tanques[3], Angulo_Azul[0], Velocidad_Azul[0])#velocidad,angulo
clock = pygame.time.Clock()
bala.disparar = aux
# el bucle principal del juego
while True:
# registramos cuanto ha pasado desde el ultimo ciclo
tick = clock.tick(60)
# Posibles entradas del teclado y mouse
if bala.disparar is True:
# al tiempo anterior le sumamos lo transcurrido
bala.tiempo = bala.tiempo + (tick / 200.0)
# Actualizar la posición e información
bala.update(bala.xUsar,bala.yUsar)
if prueba < bala.yreal:
prueba = bala.yreal
if Turno[0] == 1:
if Angulo_Azul[0] >= 90:
text = "Metros = %d m Altura = %d m" % (
XdelTank[0]-bala.xreal, prueba)
elif Angulo_Azul[0] < 90:
text = "Metros = %d m Altura = %d m" % (
bala.xreal-XdelTank[0], prueba)
if Turno[0] == 2:
if Angulo_Rojo[0] >= 90:
text = "Metros = %d m Altura = %d m" % (
XdelTank[1]-bala.xreal, prueba)
elif Angulo_Rojo[0] < 90:
text = "Metros = %d m Altura = %d m" % (
bala.xreal-XdelTank[1], prueba)
mensaje = fuente.render(text, 600, Negro)
fuente = pygame.font.Font(None,50)
if bala.disparar == True:
#if vidaTank[0] >= 0 and vidaTank[1] >= 0:
if (int(bala.y)+11 >= ancho) or (int(bala.x)+11 >= largo) or (int(bala.y) <= 0) or (int(bala.x) <= 0):
print("Tu disparo no sirvio")
bala.disparar = False
Terreno.dibuja_mapa(Pant,mapa)
if Turno[0] == 1:
Turno[0] = 2
elif Turno[0] == 2:
Turno[0] = 1
break
#Se ve si la bala de 105 mm impacta contra el terreno
elif botonamarillo == True:
#Es el turno del tanque azul
if Turno[0] == 1:
#Valida si impacta en el tanque azul
if mapa[int(bala.y)][int(bala.x)+10] == 2 or mapa[int(bala.y)][int(bala.x)-10] == 2 or mapa[int(bala.y)+10][int(bala.x)] == 2 or mapa[int(bala.y)-10][int(bala.x)] == 2:
print("cayó en el tanque azul")
#Partida[0] = 1
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[0] > 0:
if Turno[0] == 1:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 1
break
#Valido si la bala impactó con el tanque rojo
elif mapa[int(bala.y)][int(bala.x)+10] == 3 or mapa[int(bala.y)][int(bala.x)-10] == 3 or mapa[int(bala.y)+10][int(bala.x)] == 3 or mapa[int(bala.y)-10][int(bala.x)] == 3:
print("cayó en el tanque rojo")
#Partida[0] = 2
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[1] > 0:
if Turno[0] == 1:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 1
break
#Se valida que la bala haya impactado en el terreno
elif mapa[int(bala.y)][int(bala.x)+10] == 1 or mapa[int(bala.y)][int(bala.x)-10] == 1 or mapa[int(bala.y)+10][int(bala.x)] == 1:
pygame.draw.circle(Pant, Amarillo, (int(bala.x), int(bala.y)), 10)
#se hacen el hoyo de la bala 105
aux2 = -2
aux1 = -2
while aux1 <= 50:
while aux2 <= 40:
if (int(bala.y)+aux1) < ancho:
if (int(bala.x)+aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)+aux2] = 0
if (int(bala.x)-aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)-aux2] = 0
aux2 += 1
aux2 = 0
aux1 += 1
pygame.display.update()
print("cayó en el suelo")
bala.disparar = False
Terreno.dibuja_mapa(Pant,mapa)
if Turno[0] == 1:
Turno[0] = 2
elif Turno[0] == 2:
Turno[0] = 1
break
#Se valida que la bala vaya por el aire y así siga su trayecto
elif mapa[int(bala.y)][int(bala.x)+10] == 0 or mapa[int(bala.y)+10][int(bala.x)] == 0 or mapa[int(bala.y)][int(bala.x)-10] == 0:
pygame.draw.circle(Pant, Amarillo, (int(bala.x), int(bala.y)), 10)
pygame.display.update()
#Es el turno del jugador rojo
elif Turno[0] == 2:
#Valida si impacta en el tanque azul
if mapa[int(bala.y)][int(bala.x)+10] == 2 or mapa[int(bala.y)+10][int(bala.x)] == 2:
print("cayó en el tanque azul")
#Partida[0] = 1
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[0] > 0:
if Turno[0] == 1:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 1
break
#Valido si la bala impactó con el tanque rojo
elif mapa[int(bala.y)][int(bala.x)+10] == 3 or mapa[int(bala.y)+10][int(bala.x)] == 3:
print("cayó en el tanque rojo")
#Partida[0] = 2
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[1] > 0:
if Turno[0] == 1:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 1
break
#Se valida que la bala haya impactado en el terreno
elif mapa[int(bala.y)][int(bala.x)+10] == 1 or mapa[int(bala.y)+10][int(bala.x)] == 1 or mapa[int(bala.y)][int(bala.x)-10] == 1:
pygame.draw.circle(Pant, Amarillo, (int(bala.x), int(bala.y)), 10)
#se hacen el hoyo de la bala 105
aux2 = -2
aux1 = -2
while aux1 <= 50:
while aux2 <= 40:
if (int(bala.y)+aux1) < ancho:
if (int(bala.x)+aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)+aux2] = 0
if (int(bala.x)-aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)-aux2] = 0
aux2 += 1
aux2 = 0
aux1 += 1
pygame.display.update()
print("cayó en el suelo")
bala.disparar = False
Terreno.dibuja_mapa(Pant,mapa)
if Turno[0] == 1:
Turno[0] = 2
elif Turno[0] == 2:
Turno[0] = 1
break
#Se valida que la bala vaya por el aire y así siga su trayecto
elif mapa[int(bala.y)][int(bala.x)+10] == 0 or mapa[int(bala.y)+10][int(bala.x)] == 0 or mapa[int(bala.y)][int(bala.x)-10] == 0:
pygame.draw.circle(Pant, Amarillo, (int(bala.x), int(bala.y)), 10)
pygame.display.update()
#Se valida si la bala perforante impactó en el terreno
elif botonnaranja == True:
if Turno[0] == 1:
#Valida si impacta en el tanque azul
if mapa[int(bala.y)][int(bala.x)+7] == 2 or mapa[int(bala.y)+7][int(bala.x)] == 2 or mapa[int(bala.y)][int(bala.x)-7] == 2:
print("cayó en el tanque azul")
#Partida[0] = 1
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[0] > 0:
if Turno[0] == 1:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 1
break
#Valido si la bala impactó con el tanque rojo
elif mapa[int(bala.y)][int(bala.x)+7] == 3 or mapa[int(bala.y)+7][int(bala.x)] == 3 or mapa[int(bala.y)][int(bala.x)-7] == 3:
print("cayó en el tanque rojo")
#Partida[0] = 2
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[1] > 0:
if Turno[0] == 1:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 1
break
#Se valida que la bala haya impactado en el terreno
elif mapa[int(bala.y)][int(bala.x)+7] == 1 or mapa[int(bala.y)+7][int(bala.x)] == 1 or mapa[int(bala.y)][int(bala.x)-7] == 1:
pygame.draw.circle(Pant, Naranja, (int(bala.x), int(bala.y)), 7)
#se hacen el hoyo de la bala perforante
aux2 = -2
aux1 = -2
while aux1 <= 40:
while aux2 <= 30:
if (int(bala.y)+aux1) < ancho:
if (int(bala.x)+aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)+aux2] = 0
if (int(bala.x)-aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)-aux2] = 0
aux2 += 1
aux2 = 0
aux1 += 1
pygame.display.update()
print("cayó en el suelo")
bala.disparar = False
Terreno.dibuja_mapa(Pant,mapa)
if Turno[0] == 1:
Turno[0] = 2
elif Turno[0] == 2:
Turno[0] = 1
break
#Se valida que la bala vaya por el aire y así siga su trayecto
elif mapa[int(bala.y)][int(bala.x)+7] == 0 or mapa[int(bala.y)+7][int(bala.x)] == 0 or mapa[int(bala.y)][int(bala.x)-7] == 0:
pygame.draw.circle(Pant, Naranja, (int(bala.x), int(bala.y)), 7)
pygame.display.update()
elif Turno[0] == 2:
#Valida si impacta en el tanque azul
if mapa[int(bala.y)][int(bala.x)+7] == 2 or mapa[int(bala.y)+7][int(bala.x)] == 2 or mapa[int(bala.y)][int(bala.x)-7] == 2:
print("cayó en el tanque azul")
#Partida[0] = 1
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[0] > 0:
if Turno[0] == 1:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 1
break
#Valido si la bala impactó con el tanque rojo
elif mapa[int(bala.y)][int(bala.x)+7] == 3 or mapa[int(bala.y)+7][int(bala.x)] == 3 or mapa[int(bala.y)][int(bala.x)-7] == 3:
print("cayó en el tanque rojo")
#Partida[0] = 2
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[1] > 0:
if Turno[0] == 1:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 1
break
#Se valida que la bala haya impactado en el terreno
elif mapa[int(bala.y)][int(bala.x)+7] == 1 or mapa[int(bala.y)+7][int(bala.x)] == 1 or mapa[int(bala.y)][int(bala.x)-7] == 1:
pygame.draw.circle(Pant, Naranja, (int(bala.x), int(bala.y)), 7)
#se hacen el hoyo de la bala perforante
aux2 = -2
aux1 = -2
while aux1 <= 40:
while aux2 <= 30:
if (int(bala.y)+aux1) < ancho:
if (int(bala.x)+aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)+aux2] = 0
if (int(bala.x)-aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)-aux2] = 0
aux2 += 1
aux2 = 0
aux1 += 1
pygame.display.update()
print("cayó en el suelo")
bala.disparar = False
Terreno.dibuja_mapa(Pant,mapa)
if Turno[0] == 1:
Turno[0] = 2
elif Turno[0] == 2:
Turno[0] = 1
break
#Se valida que la bala vaya por el aire y así siga su trayecto
elif mapa[int(bala.y)][int(bala.x)+7] == 0 or mapa[int(bala.y)-7][int(bala.x)] == 0 or mapa[int(bala.y)][int(bala.x)-7] == 0:
pygame.draw.circle(Pant, Naranja, (int(bala.x), int(bala.y)), 7)
pygame.display.update()
#Se valida si la bala 60 mm impactó en el terreno
elif botonmorado == True:
if Turno[0] == 1:
#Valida si impacta en el tanque azul
if mapa[int(bala.y)][int(bala.x)+5] == 2 or mapa[int(bala.y)+5][int(bala.x)] == 2 or mapa[int(bala.y)][int(bala.x)-5] == 2:
print("cayó en el tanque azul")
#Partida[0] = 1
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[0] > 0:
if Turno[0] == 1:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 1
break
#Valido si la bala impactó con el tanque rojo
elif mapa[int(bala.y)][int(bala.x)+5] == 3 or mapa[int(bala.y)+5][int(bala.x)] == 3 or mapa[int(bala.y)][int(bala.x)-5] == 3:
print("cayó en el tanque rojo")
#Partida[0] = 2
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[1] > 0:
if Turno[0] == 1:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 1
break
#Se valida que la bala haya impactado en el terreno
elif mapa[int(bala.y)][int(bala.x)+5] == 1 or mapa[int(bala.y)+5][int(bala.x)] == 1 or mapa[int(bala.y)][int(bala.x)-5] == 1:
pygame.draw.circle(Pant, Morado, (int(bala.x), int(bala.y)), 5)
#se hacen el hoyo de la bala 60
aux2 = -2
aux1 = -2
while aux1 <= 30:
while aux2 <= 20:
if (int(bala.y)+aux1) < ancho:
if (int(bala.x)+aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)+aux2] = 0
if (int(bala.x)-aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)-aux2] = 0
aux2 += 1
aux2 = 0
aux1 += 1
pygame.display.update()
print("cayó en el suelo")
bala.disparar = False
Terreno.dibuja_mapa(Pant,mapa)
if Turno[0] == 1:
Turno[0] = 2
elif Turno[0] == 2:
Turno[0] = 1
break
#Se valida que la bala vaya por el aire y así siga su trayecto
elif mapa[int(bala.y)][int(bala.x)+5] == 0 or mapa[int(bala.y)+5][int(bala.x)] == 0 or mapa[int(bala.y)][int(bala.x)-5] == 0:
pygame.draw.circle(Pant, Morado, (int(bala.x), int(bala.y)), 5)
pygame.display.update()
elif Turno[0] == 2:
#Valida si impacta en el tanque azul
if mapa[int(bala.y)][int(bala.x)+5] == 2 or mapa[int(bala.y)+5][int(bala.x)] == 2 or mapa[int(bala.y)][int(bala.x)-5] == 2:
print("cayó en el tanque azul")
#Partida[0] = 1
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[0] > 0:
if Turno[0] == 1:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[0] = vidaTank[0] - Balaaux[0]
#print("La vida del Azul es: ",vidaTank[0])
Turno[0] = 1
break
#Valido si la bala impactó con el tanque rojo
elif mapa[int(bala.y)][int(bala.x)+5] == 3 or mapa[int(bala.y)+5][int(bala.x)] == 3 or mapa[int(bala.y)][int(bala.x)-5] == 3:
print("cayó en el tanque rojo")
#Partida[0] = 2
#bala.disparar = False
#bala = Proyectil(300, 300, angulo, velocidad)#velocidad,angulo
Terreno.dibuja_mapa(Pant,mapa)
if vidaTank[1] > 0:
if Turno[0] == 1:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 2
break
if Turno[0] == 2:
vidaTank[1] = vidaTank[1] - Balaaux[0]
#print("La vida del rojo es: ",vidaTank[1])
Turno[0] = 1
break
#Se valida que la bala haya impactado en el terreno
elif mapa[int(bala.y)][int(bala.x)+5] == 1 or mapa[int(bala.y)+5][int(bala.x)] == 1 or mapa[int(bala.y)][int(bala.x)-5] == 1:
pygame.draw.circle(Pant, Morado, (int(bala.x), int(bala.y)), 5)
#se hacen el hoyo de la bala 60
aux2 = -2
aux1 = -2
while aux1 <= 30:
while aux2 <= 20:
if (int(bala.y)+aux1) < ancho:
if (int(bala.x)+aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)+aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)+aux2] = 0
if (int(bala.x)-aux2 < largo):
if mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 2 and mapa[int(bala.y)+aux1][int(bala.x)-aux2] != 3:
mapa[int(bala.y)+aux1][int(bala.x)-aux2] = 0
aux2 += 1
aux2 = 0
aux1 += 1
pygame.display.update()
print("cayó en el suelo")
bala.disparar = False
Terreno.dibuja_mapa(Pant,mapa)
if Turno[0] == 1:
Turno[0] = 2
elif Turno[0] == 2:
Turno[0] = 1
break
#Se valida que la bala vaya por el aire y así siga su trayecto
elif mapa[int(bala.y)][int(bala.x)+5] == 0 or mapa[int(bala.y)+5][int(bala.x)] == 0 or mapa[int(bala.y)][int(bala.x)-5] == 0:
pygame.draw.circle(Pant, Morado, (int(bala.x), int(bala.y)), 5)
pygame.display.update()
# actualizamos la pantalla
pygame.display.update()
posicion()
if vidaTank[0] <= 0:
print("Perdió: Tanque Azul")
Partida[0] = 1
if vidaTank[1] <= 0:
print("Perdió: Tanque Rojo")
Partida[0] = 2
Pant.blit(mensaje, (400, 50))
'''
pass
| 53.621019
| 195
| 0.340055
| 3,317
| 33,674
| 3.438951
| 0.053663
| 0.160778
| 0.093977
| 0.113614
| 0.900763
| 0.890243
| 0.888051
| 0.888051
| 0.885246
| 0.885246
| 0
| 0.060919
| 0.553959
| 33,674
| 628
| 196
| 53.621019
| 0.698535
| 0.980578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 10
|
9b5c7fc3490cff3658e9cf5065e84822aebddca2
| 7,552
|
py
|
Python
|
tests/test_main.py
|
henrysky/simple_tf_raytracing
|
cba18dd544436f1ee44f1e9d064fd3e9e02e7dcb
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
henrysky/simple_tf_raytracing
|
cba18dd544436f1ee44f1e9d064fd3e9e02e7dcb
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
henrysky/simple_tf_raytracing
|
cba18dd544436f1ee44f1e9d064fd3e9e02e7dcb
|
[
"MIT"
] | null | null | null |
import unittest
from tfrt import *
import numpy.testing as npt
class MyTestCase(unittest.TestCase):
def test_pyramidsarray(self):
pyramidss = PyramidArray(tf.constant([0., 0., 0.]), 1, 0.5, (4, 4), reflectivity=0.1)
rays = Ray(p0=tf.constant([[0.2, 0.4, 2.], [0.2, 0.4, -2.], [2., 1.5, 0.5]], dtype=precision),
p1=tf.constant([[0., 0., -1.], [0., 0., 1.], [-1., 0., -1.]], dtype=precision),
intensity=tf.ones(3),
interact_num=tf.zeros(3, dtype=tf.int32))
pt = pyramidss.intersect(rays)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[0.2, 0.4, 0.2], [0.2, 0.4, 0.], [1.75, 1.5, 0.25]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[-1., 0., 0.], [0., 0., -1.], [1., 0., 1.]]))
pt = pyramidss.intersect(pt)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[-0.2, 0.4, 0.2], [0.2, 0.4, 0.], [1.75, 1.5, 0.25]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.], [0., 0., -1.], [1., 0., 1.]]))
pt = pyramidss.intersect(pt)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[-0.2, 0.4, 0.2], [0.2, 0.4, 0.], [1.75, 1.5, 0.25]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.], [0., 0., -1.], [1., 0., 1.]]))
pt = pyramidss.intersect(pt)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[-0.2, 0.4, 0.2], [0.2, 0.4, 0.], [1.75, 1.5, 0.25]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.], [0., 0., -1.], [1., 0., 1.]]))
npt.assert_array_almost_equal(pt.interact_num.numpy(), np.array([2, 1, 1]))
def test_pyramidsspacing(self):
pyramidss = PyramidArray(tf.constant([0., 0., 0.]), 1, 0.5, (4, 4), spacing=1., reflectivity=0.1)
rays = Ray(p0=tf.constant([[0.2, 0.4, 2.]], dtype=precision),
p1=tf.constant([[0., 0., -1.]], dtype=precision),
intensity=tf.ones(1),
interact_num=tf.zeros(1, dtype=tf.int32))
pt = pyramidss.intersect(rays)
pt = pyramidss.intersect(pt)
pt = pyramidss.intersect(pt)
pt = pyramidss.intersect(pt)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[0.2, 0.4, 0.]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.]]))
npt.assert_array_almost_equal(pt.interact_num.numpy(), np.array([1]))
pyramidss = PyramidArray(tf.constant([0., 0., 0.]), 1, 0.5, (4, 4), spacing=0.1, reflectivity=0.1)
rays = Ray(p0=tf.constant([[0.2, 0.4, 2.]], dtype=precision),
p1=tf.constant([[0., 0., -1.]], dtype=precision),
intensity=tf.ones(1),
interact_num=tf.zeros(1, dtype=tf.int32))
pt = pyramidss.intersect(rays)
pt = pyramidss.intersect(pt)
pt = pyramidss.intersect(pt)
pt = pyramidss.intersect(pt)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[-0.2, 0.4, 0.15]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.]]))
npt.assert_array_almost_equal(pt.interact_num.numpy(), np.array([2]))
def test_cone(self):
cone = Cone(tf.constant([0., 0., 0.]), 1., 1., reflectivity=1)
# test with single ray
rays = Ray(p0=tf.constant([[0., -2., 0.5]], dtype=precision),
p1=tf.constant([[0., 1., 0.]], dtype=precision),
intensity=tf.ones(1),
interact_num=tf.zeros(1, dtype=tf.int32))
pt = cone.intersect(rays)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[0., -0.5, 0.5]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.]]))
# test with multiple rays
rays = Ray(p0=tf.constant([[0., -2., 0.5], [0., -2., -0.5], [0.5, 0., 2.], [0.5, 0., 0.7]], dtype=precision),
p1=tf.constant([[0., 1., 0.], [0., 1., 0.], [0., 0., -1.], [0., 0., -1.]], dtype=precision),
intensity=tf.ones(4),
interact_num=tf.zeros(4, dtype=tf.int32))
pt = cone.intersect(rays)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[0., -0.5, 0.5], [0., -2., -0.5], [0.5, 0., 0.5],
[0.5, 0., 0.5]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.], [0., 1., 0.], [1., 0., 0.],
[1., 0., 0.]]))
# test with multiple rays from behind
rays = Ray(p0=tf.constant([[0., 2., 0.5], [0., 2., 1.5]], dtype=precision),
p1=tf.constant([[0., -1., 0.], [0., 1., 0.]], dtype=precision),
intensity=tf.ones(2),
interact_num=tf.zeros(2, dtype=tf.int32))
pt = cone.intersect(rays)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[0., 0.5, 0.5], [0., 2., 1.5]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.], [0., 1., 0.]]))
def test_conesarray(self):
coness = ConeArray(tf.constant([0., 0., 0.]), 1, 1., (2, 2), reflectivity=1.)
rays = Ray(p0=tf.constant([[0.1, 1., 2.], [0.2, 0.4, -2.], [0.1, 1., .2]], dtype=precision),
p1=tf.constant([[0., 0., -1.], [0., 0., 1.], [0., 0., -1.]], dtype=precision),
intensity=tf.ones(3),
interact_num=tf.zeros(3, dtype=tf.int32))
pt = coness.intersect(rays)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[0.1, 1., 0.1], [0.2, 0.4, 0.], [0.1, 1., 0.1]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[-1., 0., 0.], [0., 0., -1.], [-1., 0., 0.]]))
pt = coness.intersect(pt)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[-0.1, 1., 0.1], [0.2, 0.4, 0.], [-0.1, 1., 0.1]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.], [0., 0., -1.], [0., 0., 1.]]))
pt = coness.intersect(pt)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[-0.1, 1., 0.1], [0.2, 0.4, 0.], [-0.1, 1., 0.1]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.], [0., 0., -1.], [0., 0., 1.]]))
pt = coness.intersect(pt)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[-0.1, 1., 0.1], [0.2, 0.4, 0.], [-0.1, 1., 0.1]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[0., 0., 1.], [0., 0., -1.], [0., 0., 1.]]))
npt.assert_array_almost_equal(pt.interact_num.numpy(), np.array([2, 1, 2]))
def test_conesdensearray(self):
coness = ConeDenseArray(center=tf.constant([0., 0., 0.]),
radius=1.,
coneheight=1.,
width=4,
height=4,
reflectivity=0.1)
rays = Ray(p0=tf.constant([[0.1, 1., 2.], [0.2, 0.4, -2.], [0.1, 1., .2]], dtype=precision),
p1=tf.constant([[0., 0., -1.], [0., 0., 1.], [0., 0., -1.]], dtype=precision),
intensity=tf.ones(3),
interact_num=tf.zeros(3, dtype=tf.int32))
pt = coness.intersect(rays)
npt.assert_array_almost_equal(pt.p0.numpy(), np.array([[0.1, 1., 0.1], [0.2, 0.4, 0.], [0.1, 1., 0.1]]))
npt.assert_array_almost_equal(pt.p1.numpy(), np.array([[-1., 0., 0.], [0., 0., -1.], [-1., 0., 0.]]))
if __name__ == '__main__':
unittest.main()
| 53.183099
| 117
| 0.504899
| 1,179
| 7,552
| 3.131467
| 0.058524
| 0.047129
| 0.039003
| 0.173348
| 0.88299
| 0.879469
| 0.877573
| 0.86403
| 0.855634
| 0.837216
| 0
| 0.102847
| 0.255826
| 7,552
| 141
| 118
| 53.560284
| 0.554093
| 0.010593
| 0
| 0.546296
| 0
| 0
| 0.001071
| 0
| 0
| 0
| 0
| 0
| 0.296296
| 1
| 0.046296
| false
| 0
| 0.027778
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
32f8c412ea1c12ccbf0235b34e87965d98722dd9
| 17,662
|
py
|
Python
|
gigantumcli/tests/test_server.py
|
gigabackup/gigantum-cli
|
603a61501f842a15edda1ef2f01cf7c835e40043
|
[
"MIT"
] | 14
|
2017-11-10T15:54:20.000Z
|
2020-11-20T12:30:50.000Z
|
gigantumcli/tests/test_server.py
|
gigabackup/gigantum-cli
|
603a61501f842a15edda1ef2f01cf7c835e40043
|
[
"MIT"
] | 31
|
2017-11-10T16:34:38.000Z
|
2021-07-16T12:19:13.000Z
|
gigantumcli/tests/test_server.py
|
gigantum/gigantum-cli
|
6390181f43e1e639105e30d58ed3df92fa049905
|
[
"MIT"
] | 7
|
2017-11-10T16:24:11.000Z
|
2022-01-25T01:29:29.000Z
|
import pytest
import tempfile
import uuid
import os
import shutil
import responses
import click
from gigantumcli.server import ServerConfig
@pytest.fixture
def server_config():
"""Fixture to create a Build instance with a test image name that does not exist and cleanup after"""
unit_test_working_dir = os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
os.mkdir(unit_test_working_dir)
os.makedirs(os.path.join(unit_test_working_dir, '.labmanager', 'identity'))
yield ServerConfig(working_dir=unit_test_working_dir)
shutil.rmtree(unit_test_working_dir)
class TestServerConfig(object):
@responses.activate
def test_server_discovery_fails(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={},
status=404)
responses.add(responses.GET, 'https://test2.gigantum.com/.well-known/discover.json',
json={},
status=404)
with pytest.raises(click.UsageError):
server_config.add_server("test2.gigantum.com")
@responses.activate
def test_auth_discovery_fails(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={},
status=404)
responses.add(responses.GET, 'https://test2.gigantum.com/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/.well-known/auth.json',
json={},
status=404)
with pytest.raises(click.UsageError):
server_config.add_server("https://test2.gigantum.com/")
with pytest.raises(click.UsageError):
server_config.add_server("https://thiswillneverwork.gigantum.com/")
@responses.activate
def test_add_server(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
assert server_id == 'another-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'another-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
@responses.activate
def test_add_server_already_configured(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
assert server_id == 'another-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'another-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
with pytest.raises(ValueError):
server_config.add_server("https://test2.gigantum.com/")
@responses.activate
def test_list_servers(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
responses.add(responses.GET, 'https://test3.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'my-server',
"name": "My Server 1",
"git_url": "https://test3.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test3.gigantum.com/api/v1/",
"object_service_url": "https://test3.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search3.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test3.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test3.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test3.api.gigantum.io",
"issuer": "https://test3-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test3-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test3.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
assert server_id == 'another-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'another-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
server_id = server_config.add_server("https://test3.gigantum.com/")
assert server_id == 'my-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'my-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'my-server'))
server_list = server_config.list_servers(should_print=True)
assert len(server_list) == 2
@responses.activate
def test_remove_server_only_one(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
os.makedirs(os.path.join(server_config.working_dir, '.labmanager', 'servers'), exist_ok=True)
with open(os.path.join(server_config.working_dir, '.labmanager', 'servers', 'CURRENT'), 'wt') as cf:
cf.write("another-server")
assert server_id == 'another-server'
assert os.path.isfile(os.path.join(server_config.servers_dir, 'another-server.json'))
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
with pytest.raises(ValueError):
server_config.remove_server('another-server')
@responses.activate
def test_remove_server(self, server_config):
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'another-server',
"name": "Another server",
"git_url": "https://test2.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test2.gigantum.com/api/v1/",
"object_service_url": "https://test2.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search2.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test2.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test2.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test2.api.gigantum.io",
"issuer": "https://test2-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test2-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test2.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
responses.add(responses.GET, 'https://test3.gigantum.com/gigantum/.well-known/discover.json',
json={"id": 'my-server',
"name": "My Server 1",
"git_url": "https://test3.repo.gigantum.com/",
"git_server_type": "gitlab",
"hub_api_url": "https://test3.gigantum.com/api/v1/",
"object_service_url": "https://test3.api.gigantum.com/object-v1/",
"user_search_url": "https://user-search3.us-east-1.cloudsearch.amazonaws.com",
"lfs_enabled": True,
"auth_config_url": "https://test3.gigantum.com/gigantum/.well-known/auth.json"},
status=200)
responses.add(responses.GET, 'https://test3.gigantum.com/gigantum/.well-known/auth.json',
json={"audience": "test3.api.gigantum.io",
"issuer": "https://test3-auth.gigantum.com",
"signing_algorithm": "RS256",
"public_key_url": "https://test3-auth.gigantum.com/.well-known/jwks.json",
"login_url": "https://test3.gigantum.com/client/login",
"login_type": "auth0",
"auth0_client_id": "0000000000000000"},
status=200)
server_id = server_config.add_server("https://test2.gigantum.com/")
assert server_id == 'another-server'
server_id = server_config.add_server("https://test3.gigantum.com/")
assert server_id == 'my-server'
# mock some more stuff
server_file = os.path.join(server_config.servers_dir, "another-server.json")
with open(os.path.join(server_config.servers_dir, 'CURRENT'), 'wt') as cf:
cf.write("another-server")
cached_jwks = os.path.join(server_config.working_dir, '.labmanager', 'identity',
'another-server-jwks.json')
with open(cached_jwks, 'wt') as jf:
jf.write("FAKE DATA")
test_user_data = os.path.join(server_config.working_dir, 'servers', 'another-server',
'TEST_FILE')
with open(test_user_data, 'wt') as jf:
jf.write("FAKE DATA")
assert os.path.isfile(test_user_data)
assert os.path.isfile(cached_jwks)
assert os.path.isfile(server_file)
assert os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
server_config.remove_server('another-server')
assert not os.path.isfile(test_user_data)
assert not os.path.isfile(cached_jwks)
assert not os.path.isfile(server_file)
assert not os.path.isdir(os.path.join(server_config.working_dir, 'servers', 'another-server'))
current_path = os.path.join(server_config.servers_dir, 'CURRENT')
with open(current_path, 'rt') as cf:
assert cf.read() == 'my-server'
| 57.158576
| 108
| 0.544785
| 1,865
| 17,662
| 4.996783
| 0.075067
| 0.103874
| 0.073828
| 0.094645
| 0.917588
| 0.908037
| 0.875952
| 0.856422
| 0.831849
| 0.821333
| 0
| 0.02817
| 0.31463
| 17,662
| 308
| 109
| 57.344156
| 0.741677
| 0.006624
| 0
| 0.808271
| 0
| 0
| 0.395005
| 0.00975
| 0
| 0
| 0
| 0
| 0.101504
| 1
| 0.030075
| false
| 0
| 0.030075
| 0
| 0.06391
| 0.003759
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b5c7b0afddd3f28aef9ea4c46835a902bfcb0d09
| 68,246
|
py
|
Python
|
python_msx_sdk/api/devices_api.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
python_msx_sdk/api/devices_api.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
python_msx_sdk/api/devices_api.py
|
CiscoDevNet/python-msx-sdk
|
d7e0a08c656504b4f4551d263e67c671a2a04b3f
|
[
"MIT"
] | null | null | null |
"""
MSX SDK
MSX SDK client. # noqa: E501
The version of the OpenAPI document: 1.0.9
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from python_msx_sdk.api_client import ApiClient, Endpoint as _Endpoint
from python_msx_sdk.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from python_msx_sdk.model.device import Device
from python_msx_sdk.model.device_compliance_state import DeviceComplianceState
from python_msx_sdk.model.device_create import DeviceCreate
from python_msx_sdk.model.device_patch import DevicePatch
from python_msx_sdk.model.device_template_attach_request import DeviceTemplateAttachRequest
from python_msx_sdk.model.device_template_batch_attach_request import DeviceTemplateBatchAttachRequest
from python_msx_sdk.model.device_template_batch_attach_response import DeviceTemplateBatchAttachResponse
from python_msx_sdk.model.device_template_history import DeviceTemplateHistory
from python_msx_sdk.model.device_template_update_request import DeviceTemplateUpdateRequest
from python_msx_sdk.model.device_update import DeviceUpdate
from python_msx_sdk.model.device_vulnerability_state import DeviceVulnerabilityState
from python_msx_sdk.model.devices_page import DevicesPage
from python_msx_sdk.model.error import Error
from python_msx_sdk.model.manage_change_request_pending import ManageChangeRequestPending
class DevicesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __attach_device_templates(
self,
id,
device_template_attach_request,
**kwargs
):
"""Attaches one or more device templates to a device instance. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.attach_device_templates(id, device_template_attach_request, async_req=True)
>>> result = thread.get()
Args:
id (str):
device_template_attach_request (DeviceTemplateAttachRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[DeviceTemplateHistory]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['device_template_attach_request'] = \
device_template_attach_request
return self.call_with_http_info(**kwargs)
self.attach_device_templates = _Endpoint(
settings={
'response_type': ([DeviceTemplateHistory],),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}/templates',
'operation_id': 'attach_device_templates',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
'device_template_attach_request',
],
'required': [
'id',
'device_template_attach_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'device_template_attach_request':
(DeviceTemplateAttachRequest,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'device_template_attach_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__attach_device_templates
)
def __batch_attach_device_templates(
self,
device_template_batch_attach_request,
**kwargs
):
"""Attaches one or more device templates to a batch of device instances. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.batch_attach_device_templates(device_template_batch_attach_request, async_req=True)
>>> result = thread.get()
Args:
device_template_batch_attach_request (DeviceTemplateBatchAttachRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[DeviceTemplateBatchAttachResponse]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['device_template_batch_attach_request'] = \
device_template_batch_attach_request
return self.call_with_http_info(**kwargs)
self.batch_attach_device_templates = _Endpoint(
settings={
'response_type': ([DeviceTemplateBatchAttachResponse],),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/templates/attach',
'operation_id': 'batch_attach_device_templates',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'device_template_batch_attach_request',
],
'required': [
'device_template_batch_attach_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'device_template_batch_attach_request':
(DeviceTemplateBatchAttachRequest,),
},
'attribute_map': {
},
'location_map': {
'device_template_batch_attach_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__batch_attach_device_templates
)
def __create_device(
self,
device_create,
**kwargs
):
"""Creates a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_device(device_create, async_req=True)
>>> result = thread.get()
Args:
device_create (DeviceCreate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Device
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['device_create'] = \
device_create
return self.call_with_http_info(**kwargs)
self.create_device = _Endpoint(
settings={
'response_type': (Device,),
'auth': [],
'endpoint_path': '/manage/api/v8/devices',
'operation_id': 'create_device',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'device_create',
],
'required': [
'device_create',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'device_create':
(DeviceCreate,),
},
'attribute_map': {
},
'location_map': {
'device_create': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__create_device
)
def __delete_device(
self,
id,
**kwargs
):
"""Deletes a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_device(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.delete_device = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}',
'operation_id': 'delete_device',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__delete_device
)
def __detach_device_template(
self,
id,
template_id,
**kwargs
):
"""Detaches a template from a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.detach_device_template(id, template_id, async_req=True)
>>> result = thread.get()
Args:
id (str):
template_id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[DeviceTemplateHistory]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['template_id'] = \
template_id
return self.call_with_http_info(**kwargs)
self.detach_device_template = _Endpoint(
settings={
'response_type': ([DeviceTemplateHistory],),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}/templates/{templateId}',
'operation_id': 'detach_device_template',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
'template_id',
],
'required': [
'id',
'template_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'template_id':
(str,),
},
'attribute_map': {
'id': 'id',
'template_id': 'templateId',
},
'location_map': {
'id': 'path',
'template_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__detach_device_template
)
def __detach_device_templates(
self,
id,
**kwargs
):
"""Detach device templates that are already attached to a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.detach_device_templates(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[DeviceTemplateHistory]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.detach_device_templates = _Endpoint(
settings={
'response_type': ([DeviceTemplateHistory],),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}/templates',
'operation_id': 'detach_device_templates',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__detach_device_templates
)
def __get_device(
self,
id,
**kwargs
):
"""Returns a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Device
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_device = _Endpoint(
settings={
'response_type': (Device,),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}',
'operation_id': 'get_device',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_device
)
def __get_device_config(
self,
id,
**kwargs
):
"""Returns the running configuration for a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_config(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
str
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_device_config = _Endpoint(
settings={
'response_type': (str,),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}/config',
'operation_id': 'get_device_config',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'text/plain',
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_device_config
)
def __get_device_template_history(
self,
id,
**kwargs
):
"""Returns device template history. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_device_template_history(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
template_id (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[DeviceTemplateHistory]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.get_device_template_history = _Endpoint(
settings={
'response_type': ([DeviceTemplateHistory],),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}/templates',
'operation_id': 'get_device_template_history',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
'template_id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'template_id':
(str,),
},
'attribute_map': {
'id': 'id',
'template_id': 'templateId',
},
'location_map': {
'id': 'path',
'template_id': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_device_template_history
)
def __get_devices_page(
self,
page,
page_size,
**kwargs
):
"""Returns a page of devices. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_devices_page(page, page_size, async_req=True)
>>> result = thread.get()
Args:
page (int):
page_size (int):
Keyword Args:
device_ids ([str]): [optional]
service_ids ([str]): [optional]
types ([str]): [optional]
serial_keys ([str]): [optional]
service_types ([str]): [optional]
models ([str]): [optional]
subtypes ([str]): [optional]
names ([str]): [optional]
versions ([str]): [optional]
tenant_ids ([str]): [optional]
include_subtenants (bool): [optional] if omitted the server will use the default value of False
severities ([str]): [optional]
compliance_states ([DeviceComplianceState]): [optional]
vulnerability_states ([DeviceVulnerabilityState]): [optional]
sort_by (str): [optional]
sort_order (str): [optional] if omitted the server will use the default value of "asc"
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
DevicesPage
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['page'] = \
page
kwargs['page_size'] = \
page_size
return self.call_with_http_info(**kwargs)
self.get_devices_page = _Endpoint(
settings={
'response_type': (DevicesPage,),
'auth': [],
'endpoint_path': '/manage/api/v8/devices',
'operation_id': 'get_devices_page',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'page',
'page_size',
'device_ids',
'service_ids',
'types',
'serial_keys',
'service_types',
'models',
'subtypes',
'names',
'versions',
'tenant_ids',
'include_subtenants',
'severities',
'compliance_states',
'vulnerability_states',
'sort_by',
'sort_order',
],
'required': [
'page',
'page_size',
],
'nullable': [
],
'enum': [
'sort_order',
],
'validation': [
'page',
'page_size',
]
},
root_map={
'validations': {
('page',): {
'inclusive_minimum': 0,
},
('page_size',): {
'inclusive_maximum': 1000,
'inclusive_minimum': 1,
},
},
'allowed_values': {
('sort_order',): {
"ASC": "asc",
"DESC": "desc"
},
},
'openapi_types': {
'page':
(int,),
'page_size':
(int,),
'device_ids':
([str],),
'service_ids':
([str],),
'types':
([str],),
'serial_keys':
([str],),
'service_types':
([str],),
'models':
([str],),
'subtypes':
([str],),
'names':
([str],),
'versions':
([str],),
'tenant_ids':
([str],),
'include_subtenants':
(bool,),
'severities':
([str],),
'compliance_states':
([DeviceComplianceState],),
'vulnerability_states':
([DeviceVulnerabilityState],),
'sort_by':
(str,),
'sort_order':
(str,),
},
'attribute_map': {
'page': 'page',
'page_size': 'pageSize',
'device_ids': 'deviceIds',
'service_ids': 'serviceIds',
'types': 'types',
'serial_keys': 'serialKeys',
'service_types': 'serviceTypes',
'models': 'models',
'subtypes': 'subtypes',
'names': 'names',
'versions': 'versions',
'tenant_ids': 'tenantIds',
'include_subtenants': 'includeSubtenants',
'severities': 'severities',
'compliance_states': 'complianceStates',
'vulnerability_states': 'vulnerabilityStates',
'sort_by': 'sortBy',
'sort_order': 'sortOrder',
},
'location_map': {
'page': 'query',
'page_size': 'query',
'device_ids': 'query',
'service_ids': 'query',
'types': 'query',
'serial_keys': 'query',
'service_types': 'query',
'models': 'query',
'subtypes': 'query',
'names': 'query',
'versions': 'query',
'tenant_ids': 'query',
'include_subtenants': 'query',
'severities': 'query',
'compliance_states': 'query',
'vulnerability_states': 'query',
'sort_by': 'query',
'sort_order': 'query',
},
'collection_format_map': {
'device_ids': 'multi',
'service_ids': 'multi',
'types': 'multi',
'serial_keys': 'multi',
'service_types': 'multi',
'models': 'multi',
'subtypes': 'multi',
'names': 'multi',
'versions': 'multi',
'tenant_ids': 'multi',
'severities': 'multi',
'compliance_states': 'multi',
'vulnerability_states': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_devices_page
)
def __patch_device(
self,
id,
device_patch,
**kwargs
):
"""Update a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_device(id, device_patch, async_req=True)
>>> result = thread.get()
Args:
id (str):
device_patch (DevicePatch):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Device
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['device_patch'] = \
device_patch
return self.call_with_http_info(**kwargs)
self.patch_device = _Endpoint(
settings={
'response_type': (Device,),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}',
'operation_id': 'patch_device',
'http_method': 'PATCH',
'servers': None,
},
params_map={
'all': [
'id',
'device_patch',
],
'required': [
'id',
'device_patch',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'device_patch':
(DevicePatch,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'device_patch': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__patch_device
)
def __redeploy_device(
self,
id,
**kwargs
):
"""Dedeploys a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.redeploy_device(id, async_req=True)
>>> result = thread.get()
Args:
id (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.call_with_http_info(**kwargs)
self.redeploy_device = _Endpoint(
settings={
'response_type': None,
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}/redeploy',
'operation_id': 'redeploy_device',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__redeploy_device
)
def __update_device(
self,
id,
device_update,
**kwargs
):
"""Update a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_device(id, device_update, async_req=True)
>>> result = thread.get()
Args:
id (str):
device_update (DeviceUpdate):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Device
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['device_update'] = \
device_update
return self.call_with_http_info(**kwargs)
self.update_device = _Endpoint(
settings={
'response_type': (Device,),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}',
'operation_id': 'update_device',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'id',
'device_update',
],
'required': [
'id',
'device_update',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'device_update':
(DeviceUpdate,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'device_update': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update_device
)
def __update_device_templates(
self,
id,
device_template_update_request,
**kwargs
):
"""Update device templates that are already attached to a device. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_device_templates(id, device_template_update_request, async_req=True)
>>> result = thread.get()
Args:
id (str):
device_template_update_request (DeviceTemplateUpdateRequest):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
[DeviceTemplateHistory]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['device_template_update_request'] = \
device_template_update_request
return self.call_with_http_info(**kwargs)
self.update_device_templates = _Endpoint(
settings={
'response_type': ([DeviceTemplateHistory],),
'auth': [],
'endpoint_path': '/manage/api/v8/devices/{id}/templates',
'operation_id': 'update_device_templates',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'id',
'device_template_update_request',
],
'required': [
'id',
'device_template_update_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'device_template_update_request':
(DeviceTemplateUpdateRequest,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'device_template_update_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__update_device_templates
)
| 36.417289
| 112
| 0.44873
| 5,607
| 68,246
| 5.197253
| 0.045479
| 0.030267
| 0.024982
| 0.025943
| 0.842456
| 0.823342
| 0.8114
| 0.797124
| 0.789952
| 0.775265
| 0
| 0.002499
| 0.46646
| 68,246
| 1,873
| 113
| 36.436733
| 0.797814
| 0.306436
| 0
| 0.612324
| 1
| 0
| 0.221471
| 0.047842
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0117
| false
| 0
| 0.014041
| 0
| 0.037442
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bd46d02618937c9b7d92a212999984111c42ad9b
| 3,927
|
py
|
Python
|
code/nn-models/utils.py
|
xigaoli/anime-ranking-trends
|
395bad92d78230c661cb718c0e83062aa7f7a974
|
[
"Apache-2.0"
] | null | null | null |
code/nn-models/utils.py
|
xigaoli/anime-ranking-trends
|
395bad92d78230c661cb718c0e83062aa7f7a974
|
[
"Apache-2.0"
] | null | null | null |
code/nn-models/utils.py
|
xigaoli/anime-ranking-trends
|
395bad92d78230c661cb718c0e83062aa7f7a974
|
[
"Apache-2.0"
] | 1
|
2021-07-01T17:39:52.000Z
|
2021-07-01T17:39:52.000Z
|
#!/usr/bin/env python
# coding: utf-8
import matplotlib.pyplot as plt
import math
plt.rcParams['figure.dpi'] = 150
def show_imgs(data,real_labels=None,pred_labels=None,classes_rev=[]):
plt.figure(figsize=(10,10))
rows=int(math.sqrt(len(data)))
cols=len(data)//rows
if(len(data)%rows!=0):
cols+=1
print("rows={},cols={}".format(rows,cols))
w=rows
h=cols
fig, axs = plt.subplots(w,h)
if(w==1):
axs=[axs]
if(h==1):
axs=[axs]
for i in range(w):
for j in range(h):
label_text = "--"
if(i*h+j<len(data)):#within bound
img=data[i*h+j]
axs[i][j].imshow(img)
if(real_labels is not None): #if label not given, just load the img
label_num=real_labels[i*h+j]
label_text = classes_rev[label_num]
else:
img=data[i*h+j]
axs[i][j].imshow(img)
if(pred_labels is not None): #if pred label given, append pred label
pred_label_num=pred_labels[i*h+j]
pred_label_text = classes_rev[pred_label_num]
label_text+="\n{}".format(pred_label_text)
axs[i][j].set_title(label_text,fontsize=5)
axs[i][j].set_axis_off()
if(pred_labels is not None):
plt.subplots_adjust(wspace=0,hspace=0.7)
else:
plt.subplots_adjust(wspace=0,hspace=0.4)
plt.show()
#display multiple labels for an image
#dispMax is how many labels to display, avoid flow out of box
def show_imgs_multi_label(data,real_labels=None,pred_labels=None,classes_rev=[],dispMax=3):
plt.figure(figsize=(10,10))
rows=int(math.sqrt(len(data)))
cols=len(data)//rows
if(len(data)%rows!=0):
cols+=1
print("rows={},cols={}".format(rows,cols))
w=rows
h=cols
fig, axs = plt.subplots(w,h)
if(w==1):
axs=[axs]
if(h==1):
axs=[axs]
for i in range(w):
for j in range(h):
label_text = ""
if(i*h+j<len(data)):#within bound
img=data[i*h+j]
axs[i][j].imshow(img)
if(real_labels is not None): #if label not given, just load the img
label_num_list=real_labels[i*h+j]
counter=0
for idx,t in enumerate(label_num_list):
if(t==1):
if(len(label_text)!=0):#do not linebreak first
label_text += ("/")
label_text += classes_rev[idx]
counter+=1
if(counter>=dispMax):
break
else:
img=data[i*h+j]
axs[i][j].imshow(img)
if(pred_labels is not None): #if pred label given, append pred label
pred_label_text=""
pred_label_num_list=pred_labels[i*h+j]
counter=0
for idx,t in enumerate(pred_label_num_list):
if(t==True):
if(len(label_text)!=0):#do not linebreak first
label_text += ("\n")
pred_label_text += "/" + classes_rev[idx]
counter+=1
if(counter>=dispMax):
break
label_text+="\n pred:{}".format(pred_label_text)
axs[i][j].set_title(label_text,fontsize=5)
axs[i][j].set_axis_off()
if(pred_labels is not None):
plt.subplots_adjust(wspace=0,hspace=0.7)
else:
plt.subplots_adjust(wspace=-0.5,hspace=1)
plt.show()
| 33
| 91
| 0.481029
| 514
| 3,927
| 3.535019
| 0.196498
| 0.084205
| 0.016511
| 0.049532
| 0.820033
| 0.76995
| 0.76995
| 0.766098
| 0.766098
| 0.719868
| 0
| 0.017654
| 0.394194
| 3,927
| 118
| 92
| 33.279661
| 0.746112
| 0.088617
| 0
| 0.723404
| 0
| 0
| 0.01684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.021277
| 0
| 0.042553
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1fb76a6e9ea6b41c3f14b38dfeebb92771f11f63
| 241
|
py
|
Python
|
sqltask/utils/performance.py
|
villebro/sqltask
|
41b67cab1a3e804b2c604571fa455d2b9e85a004
|
[
"MIT"
] | 10
|
2019-10-09T15:34:13.000Z
|
2022-02-21T07:44:03.000Z
|
sqltask/utils/performance.py
|
villebro/sqltask
|
41b67cab1a3e804b2c604571fa455d2b9e85a004
|
[
"MIT"
] | 23
|
2019-10-09T15:20:01.000Z
|
2020-02-08T11:51:24.000Z
|
sqltask/utils/performance.py
|
villebro/sqltask
|
41b67cab1a3e804b2c604571fa455d2b9e85a004
|
[
"MIT"
] | 4
|
2019-10-09T15:20:51.000Z
|
2020-02-11T08:43:03.000Z
|
import os
def is_developer_mode() -> bool:
"""
Check if developer mode is activated.
:return: True if developer mode is active, otherwise False
"""
return False if os.getenv("SQLTASK_DEVELOPER_MODE") is None else True
| 21.909091
| 73
| 0.692946
| 34
| 241
| 4.794118
| 0.558824
| 0.319018
| 0.276074
| 0.208589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224066
| 241
| 10
| 74
| 24.1
| 0.871658
| 0.40249
| 0
| 0
| 0
| 0
| 0.177419
| 0.177419
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1fb77536bc75d5c2b6a7f3236809207426b1db1a
| 44
|
py
|
Python
|
code/sample_4-3-10.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | 1
|
2022-03-29T13:50:12.000Z
|
2022-03-29T13:50:12.000Z
|
code/sample_4-3-10.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
code/sample_4-3-10.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
x = [1, 2, 3, 4, 5]
print(x.pop())
print(x)
| 11
| 19
| 0.477273
| 11
| 44
| 1.909091
| 0.727273
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 0.204545
| 44
| 3
| 20
| 14.666667
| 0.457143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
9508b3617086b46d8e5cdfd535f55ee92c1a7e7f
| 10,351
|
py
|
Python
|
saleor/plugins/admin_email/tests/test_tasks.py
|
greentornado/saleor
|
7f58917957a23c4dd90b47214a4500c91c735dee
|
[
"CC-BY-4.0"
] | 3
|
2021-06-22T12:38:18.000Z
|
2021-07-11T15:01:57.000Z
|
saleor/plugins/admin_email/tests/test_tasks.py
|
greentornado/saleor
|
7f58917957a23c4dd90b47214a4500c91c735dee
|
[
"CC-BY-4.0"
] | 111
|
2021-07-19T04:19:30.000Z
|
2022-03-28T04:32:37.000Z
|
saleor/plugins/admin_email/tests/test_tasks.py
|
aminziadna/saleor
|
2e78fb5bcf8b83a6278af02551a104cfa555a1fb
|
[
"CC-BY-4.0"
] | 6
|
2021-11-08T16:43:05.000Z
|
2022-03-22T17:31:16.000Z
|
from unittest import mock
from ....account.notifications import get_default_user_payload
from ....csv import ExportEvents
from ....csv.models import ExportEvent
from ....csv.notifications import get_default_export_payload
from ....order.notifications import get_default_order_payload
from ...email_common import EmailConfig
from ..tasks import (
send_email_with_link_to_download_file_task,
send_export_failed_email_task,
send_set_staff_password_email_task,
send_staff_order_confirmation_email_task,
send_staff_password_reset_email_task,
)
@mock.patch("saleor.plugins.email_common.send_mail")
def test_send_staff_password_reset_email_task_default_template(
mocked_send_mail, email_dict_config, customer_user
):
token = "token123"
recipient_email = "admin@example.com"
payload = {
"user": get_default_user_payload(customer_user),
"recipient_email": recipient_email,
"token": token,
"reset_url": f"http://localhost:8000/redirect{token}",
"domain": "localhost:8000",
"site_name": "Saleor",
}
send_staff_password_reset_email_task(recipient_email, payload, email_dict_config)
# confirm that mail has correct structure and email was sent
assert mocked_send_mail.called
@mock.patch("saleor.plugins.admin_email.tasks.send_email")
def test_send_staff_password_reset_email_task_custom_template(
mocked_send_email, email_dict_config, admin_email_plugin, customer_user
):
expected_template_str = "<html><body>Template body</body></html>"
expected_subject = "Test Email Subject"
admin_email_plugin(
staff_password_reset_template=expected_template_str,
staff_password_reset_subject=expected_subject,
)
token = "token123"
recipient_email = "admin@example.com"
payload = {
"user": get_default_user_payload(customer_user),
"recipient_email": recipient_email,
"token": token,
"reset_url": f"http://localhost:8000/redirect{token}",
"domain": "localhost:8000",
"site_name": "Saleor",
}
send_staff_password_reset_email_task(recipient_email, payload, email_dict_config)
email_config = EmailConfig(**email_dict_config)
mocked_send_email.assert_called_with(
config=email_config,
recipient_list=[recipient_email],
context=payload,
subject=expected_subject,
template_str=expected_template_str,
)
@mock.patch("saleor.plugins.email_common.send_mail")
def test_send_set_staff_password_email_task_default_template(
mocked_send_mail, email_dict_config, customer_user
):
recipient_email = "user@example.com"
token = "token123"
payload = {
"user": get_default_user_payload(customer_user),
"recipient_email": recipient_email,
"token": token,
"password_set_url": f"http://localhost:8000/redirect{token}",
"site_name": "Saleor",
"domain": "localhost:8000",
}
send_set_staff_password_email_task(recipient_email, payload, email_dict_config)
# confirm that mail has correct structure and email was sent
assert mocked_send_mail.called
@mock.patch("saleor.plugins.admin_email.tasks.send_email")
def test_send_set_staff_password_email_task_custom_template(
mocked_send_email, email_dict_config, admin_email_plugin, customer_user
):
expected_template_str = "<html><body>Template body</body></html>"
expected_subject = "Test Email Subject"
admin_email_plugin(
set_staff_password_template=expected_template_str,
set_staff_password_title=expected_subject,
)
recipient_email = "user@example.com"
token = "token123"
payload = {
"user": get_default_user_payload(customer_user),
"recipient_email": recipient_email,
"token": token,
"password_set_url": f"http://localhost:8000/redirect{token}",
"site_name": "Saleor",
"domain": "localhost:8000",
}
send_set_staff_password_email_task(recipient_email, payload, email_dict_config)
email_config = EmailConfig(**email_dict_config)
mocked_send_email.assert_called_with(
config=email_config,
recipient_list=[recipient_email],
context=payload,
subject=expected_subject,
template_str=expected_template_str,
)
@mock.patch("saleor.plugins.email_common.send_mail")
def test_send_email_with_link_to_download_file_task_default_template(
mocked_send_mail, email_dict_config, customer_user, user_export_file
):
recipient_email = "admin@example.com"
csv_url = "http://127.0.0.1:8000"
payload = {
"export": get_default_export_payload(user_export_file),
"csv_link": csv_url,
"recipient_email": user_export_file.user.email,
"site_name": "Saleor",
"domain": "localhost:8000",
}
send_email_with_link_to_download_file_task(
recipient_email, payload, email_dict_config
)
# confirm that mail has correct structure and email was sent
assert mocked_send_mail.called
assert ExportEvent.objects.filter(
export_file=user_export_file,
user=user_export_file.user,
type=ExportEvents.EXPORTED_FILE_SENT,
).exists()
@mock.patch("saleor.plugins.admin_email.tasks.send_email")
def test_send_email_with_link_to_download_file_task_custom_template(
mocked_send_email, email_dict_config, admin_email_plugin, user_export_file
):
expected_template_str = "<html><body>Template body</body></html>"
expected_subject = "Test Email Subject"
admin_email_plugin(
csv_product_export=expected_template_str,
csv_product_export_title=expected_subject,
)
recipient_email = "admin@example.com"
csv_url = "http://127.0.0.1:8000"
payload = {
"export": get_default_export_payload(user_export_file),
"csv_link": csv_url,
"recipient_email": user_export_file.user.email,
"site_name": "Saleor",
"domain": "localhost:8000",
}
send_email_with_link_to_download_file_task(
recipient_email, payload, email_dict_config
)
email_config = EmailConfig(**email_dict_config)
mocked_send_email.assert_called_with(
config=email_config,
recipient_list=[recipient_email],
context=payload,
subject=expected_subject,
template_str=expected_template_str,
)
assert ExportEvent.objects.filter(
export_file=user_export_file,
user=user_export_file.user,
type=ExportEvents.EXPORTED_FILE_SENT,
).exists()
@mock.patch("saleor.plugins.email_common.send_mail")
def test_send_export_failed_email_task_default_template(
mocked_send_mail, email_dict_config, user_export_file
):
recipient_email = "admin@example.com"
payload = {
"export": get_default_export_payload(user_export_file),
"recipient_email": recipient_email,
"site_name": "Saleor",
"domain": "localhost:8000",
}
send_export_failed_email_task(recipient_email, payload, email_dict_config)
# confirm that mail has correct structure and email was sent
assert mocked_send_mail.called
assert ExportEvent.objects.filter(
export_file=user_export_file,
user=user_export_file.user,
type=ExportEvents.EXPORT_FAILED_INFO_SENT,
)
@mock.patch("saleor.plugins.admin_email.tasks.send_email")
def test_send_export_failed_email_task_custom_template(
mocked_send_email, email_dict_config, admin_email_plugin, user_export_file
):
expected_template_str = "<html><body>Template body</body></html>"
expected_subject = "Test Email Subject"
admin_email_plugin(
csv_product_export_failed=expected_template_str,
csv_product_export_failed_title=expected_subject,
)
recipient_email = "admin@example.com"
payload = {
"export": get_default_export_payload(user_export_file),
"recipient_email": recipient_email,
"site_name": "Saleor",
"domain": "localhost:8000",
}
send_export_failed_email_task(recipient_email, payload, email_dict_config)
email_config = EmailConfig(**email_dict_config)
mocked_send_email.assert_called_with(
config=email_config,
recipient_list=[recipient_email],
context=payload,
subject=expected_subject,
template_str=expected_template_str,
)
assert ExportEvent.objects.filter(
export_file=user_export_file,
user=user_export_file.user,
type=ExportEvents.EXPORT_FAILED_INFO_SENT,
)
@mock.patch("saleor.plugins.email_common.send_mail")
def test_send_staff_order_confirmation_email_task_default_template(
mocked_send_mail, email_dict_config, order_with_lines
):
recipient_email = "user@example.com"
payload = {
"order": get_default_order_payload(
order_with_lines, "http://localhost:8000/redirect"
),
"recipient_list": [recipient_email],
"site_name": "Saleor",
"domain": "localhost:8000",
}
send_staff_order_confirmation_email_task(
[recipient_email], payload, email_dict_config
)
# confirm that mail has correct structure and email was sent
assert mocked_send_mail.called
@mock.patch("saleor.plugins.admin_email.tasks.send_email")
def test_send_staff_order_confirmation_email_task_custom_template(
mocked_send_email, order_with_lines, email_dict_config, admin_email_plugin
):
expected_template_str = "<html><body>Template body</body></html>"
expected_subject = "Test Email Subject"
admin_email_plugin(
staff_order_confirmation=expected_template_str,
staff_order_confirmation_title=expected_subject,
)
recipient_email = "user@example.com"
payload = {
"order": get_default_order_payload(
order_with_lines, "http://localhost:8000/redirect"
),
"recipient_list": [recipient_email],
"site_name": "Saleor",
"domain": "localhost:8000",
}
send_staff_order_confirmation_email_task(
[recipient_email], payload, email_dict_config
)
email_config = EmailConfig(**email_dict_config)
mocked_send_email.assert_called_with(
config=email_config,
recipient_list=[recipient_email],
context=payload,
subject=expected_subject,
template_str=expected_template_str,
)
| 33.937705
| 85
| 0.723795
| 1,261
| 10,351
| 5.492466
| 0.071372
| 0.082876
| 0.054144
| 0.031764
| 0.92622
| 0.92261
| 0.894311
| 0.886659
| 0.851429
| 0.851429
| 0
| 0.011376
| 0.184716
| 10,351
| 304
| 86
| 34.049342
| 0.809338
| 0.028403
| 0
| 0.750973
| 0
| 0
| 0.175803
| 0.039797
| 0
| 0
| 0
| 0
| 0.054475
| 1
| 0.038911
| false
| 0.062257
| 0.031128
| 0
| 0.070039
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
9514653df022a5207354befade0b8562494efba2
| 86
|
py
|
Python
|
pyscf/prop/hfc/__init__.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | 1
|
2018-05-02T19:55:30.000Z
|
2018-05-02T19:55:30.000Z
|
pyscf/prop/hfc/__init__.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | null | null | null |
pyscf/prop/hfc/__init__.py
|
nmardirossian/pyscf
|
57c8912dcfcc1157a822feede63df54ed1067115
|
[
"BSD-2-Clause"
] | 1
|
2018-12-06T03:10:50.000Z
|
2018-12-06T03:10:50.000Z
|
#!/usr/bin/env python
from pyscf.prop.hfc import uhf
from pyscf.prop.hfc import uks
| 14.333333
| 30
| 0.755814
| 16
| 86
| 4.0625
| 0.6875
| 0.276923
| 0.4
| 0.492308
| 0.676923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 86
| 5
| 31
| 17.2
| 0.878378
| 0.232558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
1f3926b56fea0c3ac0692b9b37c56169802dfb4f
| 180
|
py
|
Python
|
anvil/sub_rig_templates/tentacle.py
|
AndresMWeber/Anvil
|
9cd202183ac998983c2bf6e55cc46bbc0ca1a78e
|
[
"Apache-2.0"
] | 3
|
2019-11-22T04:38:06.000Z
|
2022-01-19T08:27:18.000Z
|
anvil/sub_rig_templates/tentacle.py
|
AndresMWeber/Anvil
|
9cd202183ac998983c2bf6e55cc46bbc0ca1a78e
|
[
"Apache-2.0"
] | 28
|
2018-02-01T20:39:42.000Z
|
2018-04-26T17:25:23.000Z
|
anvil/sub_rig_templates/tentacle.py
|
AndresMWeber/Anvil
|
9cd202183ac998983c2bf6e55cc46bbc0ca1a78e
|
[
"Apache-2.0"
] | 1
|
2018-03-11T06:47:26.000Z
|
2018-03-11T06:47:26.000Z
|
from base_sub_rig_template import SubRigTemplate
class Tentacle(SubRigTemplate):
BUILT_IN_META_DATA = SubRigTemplate.BUILT_IN_META_DATA.merge({'name': 'tentacle'}, new=True)
| 30
| 96
| 0.811111
| 24
| 180
| 5.708333
| 0.708333
| 0.277372
| 0.306569
| 0.364964
| 0.423358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094444
| 180
| 5
| 97
| 36
| 0.840491
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1f43f0d348adb2ed3e0f97b57645a707d6e22a14
| 178
|
py
|
Python
|
flash/text/seq2seq/core/__init__.py
|
alvin-chang/lightning-flash
|
481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5
|
[
"Apache-2.0"
] | 2
|
2021-06-25T08:42:36.000Z
|
2021-06-25T08:49:29.000Z
|
flash/text/seq2seq/core/__init__.py
|
alvin-chang/lightning-flash
|
481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5
|
[
"Apache-2.0"
] | null | null | null |
flash/text/seq2seq/core/__init__.py
|
alvin-chang/lightning-flash
|
481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5
|
[
"Apache-2.0"
] | null | null | null |
from flash.text.seq2seq.core.data import Seq2SeqData
from flash.text.seq2seq.core.finetuning import Seq2SeqFreezeEmbeddings
from flash.text.seq2seq.core.model import Seq2SeqTask
| 44.5
| 70
| 0.865169
| 24
| 178
| 6.416667
| 0.5
| 0.175325
| 0.253247
| 0.38961
| 0.467532
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036145
| 0.067416
| 178
| 3
| 71
| 59.333333
| 0.891566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1f78ef0b1ef277eb84f2732ce144bde61a64777e
| 179
|
py
|
Python
|
test/sudo_test.py
|
gg-lc/RLBench
|
7ce7487633dac1d671ea939694faf130304e2cd3
|
[
"MIT"
] | null | null | null |
test/sudo_test.py
|
gg-lc/RLBench
|
7ce7487633dac1d671ea939694faf130304e2cd3
|
[
"MIT"
] | null | null | null |
test/sudo_test.py
|
gg-lc/RLBench
|
7ce7487633dac1d671ea939694faf130304e2cd3
|
[
"MIT"
] | null | null | null |
import shlex
import subprocess
p = subprocess.Popen(shlex.split('sudo echo 1'), stdout=subprocess.PIPE)
p = subprocess.Popen(shlex.split('sudo echo 1'), stdout=subprocess.PIPE)
| 25.571429
| 72
| 0.765363
| 26
| 179
| 5.269231
| 0.423077
| 0.160584
| 0.233577
| 0.306569
| 0.80292
| 0.80292
| 0.80292
| 0.80292
| 0.80292
| 0.80292
| 0
| 0.012422
| 0.100559
| 179
| 6
| 73
| 29.833333
| 0.838509
| 0
| 0
| 0.5
| 0
| 0
| 0.123596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 11
|
2f10554fd39132959cdf59baedc5a46a4b7cc2a0
| 175
|
py
|
Python
|
Slicing_Strings.py
|
belmiro-kunga/Curso-de-python
|
ce1c59c19aefbe789435c855b3fa950abb14bcae
|
[
"MIT"
] | null | null | null |
Slicing_Strings.py
|
belmiro-kunga/Curso-de-python
|
ce1c59c19aefbe789435c855b3fa950abb14bcae
|
[
"MIT"
] | null | null | null |
Slicing_Strings.py
|
belmiro-kunga/Curso-de-python
|
ce1c59c19aefbe789435c855b3fa950abb14bcae
|
[
"MIT"
] | null | null | null |
#Slicing Strings
"""
b = "hello, word!"
print(b[2:5])
"""
"""
b = "hello, word!"
print(b[2:5])
"""
"""
b = "hello, word!"
print(b[2:])
"""
b = "hello, word!"
print(b[-5:-2])
| 10.294118
| 18
| 0.485714
| 29
| 175
| 2.931034
| 0.275862
| 0.282353
| 0.470588
| 0.705882
| 0.811765
| 0.623529
| 0.623529
| 0.623529
| 0.623529
| 0.623529
| 0
| 0.047945
| 0.165714
| 175
| 17
| 19
| 10.294118
| 0.534247
| 0.274286
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
2f113d45ecd115c700647032729cc14f9f5eb477
| 6,841
|
py
|
Python
|
matdgl/models/finetune.py
|
huzongxiang/CrysNetwork
|
b6772474a65ba5ae1a7942b0d2abca50168b5ffa
|
[
"BSD-2-Clause"
] | 4
|
2022-01-10T09:15:41.000Z
|
2022-01-19T04:01:29.000Z
|
matdgl/models/finetune.py
|
huzongxiang/CrysNetwork
|
b6772474a65ba5ae1a7942b0d2abca50168b5ffa
|
[
"BSD-2-Clause"
] | null | null | null |
matdgl/models/finetune.py
|
huzongxiang/CrysNetwork
|
b6772474a65ba5ae1a7942b0d2abca50168b5ffa
|
[
"BSD-2-Clause"
] | 1
|
2022-01-10T09:13:13.000Z
|
2022-01-10T09:13:13.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 20 10:17:16 2022
@author: huzongxiang
"""
from pathlib import Path
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras import layers
from matdgl.layers import Set2Set
from matdgl.models.pretrainer import TransformerModel
ModulePath = Path(__file__).parent.absolute()
def FinetuneTransformerRes(state_dim=16,
sp_dim=230,
output_dim=32,
readout_units=128,
dropout=0.0,
reg2=0.0,
reg3=0.0,
reg_rec=0.0,
regression=False,
ntarget=1,
multiclassification=None,
weight_path=Path(ModulePath/"model/transformer.hdf5"),
):
transformer = TransformerModel(atom_dim=16,
bond_dim=64,
num_atom=119,
state_dim=16,
sp_dim=230,
units=32,
edge_steps=1,
transform_steps=1,
num_attention_heads=8,
dense_units=64,
reg0=0.00,
reg1=0.00,
batch_size=32,
spherical_harmonics=True)
transformer.load_weights(weight_path)
for layer in transformer.layers:
layer.trainable = False
x_nodes, edges_matrixs = transformer.layers[-5].output
state_attrs = transformer.layers[-2].output
state_attrs_ = layers.Embedding(sp_dim, state_dim, dtype="float32", name="state_attrs")(state_attrs)
x_state = layers.Dense(16, kernel_regularizer=l2(reg2))(state_attrs_)
x_node = Set2Set(output_dim, kernel_regularizer=l2(reg2), recurrent_regularizer=l2(reg_rec))(x_nodes)
x_edge = Set2Set(output_dim, kernel_regularizer=l2(reg2), recurrent_regularizer=l2(reg_rec))(edges_matrixs, edge_mode=True)
x = layers.Concatenate(axis=-1, name='concat')([x_node, x_edge, x_state])
# x = Set2Set(output_dim, kernel_regularizer=l2(reg2), recurrent_regularizer=l2(reg_rec))(x)
x = layers.Dense(readout_units, activation="relu", kernel_regularizer=l2(reg3), name='readout0')(x)
x_orgin = x
x = layers.Dense(readout_units, activation="relu", kernel_regularizer=l2(reg3), name='res0')(x)
x = layers.Dense(readout_units//2, activation="relu", kernel_regularizer=l2(reg3), name='res1')(x)
x = layers.Dense(readout_units//4, activation="relu", kernel_regularizer=l2(reg3), name='res2')(x)
x = layers.Dense(readout_units//2, activation="relu", kernel_regularizer=l2(reg3), name='res3')(x)
x = layers.Dense(readout_units, activation="relu", kernel_regularizer=l2(reg3), name='res4')(x)
x = layers.Add()([x, x_orgin])
if dropout:
x = layers.Dropout(dropout, name='dropout0')(x)
x = layers.Dense(readout_units//2, activation="relu", kernel_regularizer=l2(reg3), name='readout1')(x)
if dropout:
x = layers.Dropout(dropout, name='dropout1')(x)
x = layers.Dense(readout_units//4, activation="relu", kernel_regularizer=l2(reg3), name='readout2')(x)
if dropout:
x = layers.Dropout(dropout, name='dropout')(x)
if regression:
x = layers.Dense(ntarget, name='final')(x)
elif multiclassification is not None:
x = layers.Dense(multiclassification, activation="softmax", name='final_softmax')(x)
else:
x = layers.Dense(1, activation="sigmoid", name='final')(x)
model = Model(
inputs=transformer.input[:-2],
outputs=[x],
)
return model
def FinetuneTransformer(state_dim=16,
sp_dim=230,
output_dim=32,
readout_units=128,
dropout=0.0,
reg2=0.0,
reg3=0.0,
reg_rec=0.0,
regression=False,
ntarget=1,
multiclassification=None,
weight_path=Path(ModulePath/"model/transformer.hdf5"),
):
transformer = TransformerModel(atom_dim=16,
bond_dim=64,
num_atom=119,
state_dim=16,
sp_dim=230,
units=32,
edge_steps=1,
transform_steps=1,
num_attention_heads=8,
dense_units=64,
reg0=0.00,
reg1=0.00,
batch_size=32,
spherical_harmonics=True)
transformer.load_weights(weight_path)
for layer in transformer.layers:
layer.trainable = False
x_nodes, edges_matrixs = transformer.layers[-5].output
state_attrs = transformer.layers[-2].output
state_attrs_ = layers.Embedding(sp_dim, state_dim, dtype="float32", name="state_attrs")(state_attrs)
x_state = layers.Dense(16, kernel_regularizer=l2(reg2))(state_attrs_)
x_node = Set2Set(output_dim, kernel_regularizer=l2(reg2), recurrent_regularizer=l2(reg_rec))(x_nodes)
x_edge = Set2Set(output_dim, kernel_regularizer=l2(reg2), recurrent_regularizer=l2(reg_rec))(edges_matrixs, edge_mode=True)
x = layers.Concatenate(axis=-1, name='concat')([x_node, x_edge, x_state])
x = layers.Dense(readout_units, activation="relu", kernel_regularizer=l2(reg3), name='readout0')(x)
if dropout:
x = layers.Dropout(dropout, name='dropout0')(x)
x = layers.Dense(readout_units//2, activation="relu", kernel_regularizer=l2(reg3), name='readout1')(x)
if dropout:
x = layers.Dropout(dropout, name='dropout1')(x)
x = layers.Dense(readout_units//4, activation="relu", kernel_regularizer=l2(reg3), name='readout2')(x)
if dropout:
x = layers.Dropout(dropout, name='dropout')(x)
if regression:
x = layers.Dense(ntarget, name='final')(x)
elif multiclassification is not None:
x = layers.Dense(multiclassification, activation="softmax", name='final_softmax')(x)
else:
x = layers.Dense(1, activation="sigmoid", name='final')(x)
model = Model(
inputs=transformer.input[:-2],
outputs=[x],
)
return model
| 38.217877
| 127
| 0.552989
| 747
| 6,841
| 4.88755
| 0.175368
| 0.049849
| 0.093673
| 0.057245
| 0.895645
| 0.895645
| 0.895645
| 0.895645
| 0.895645
| 0.895645
| 0
| 0.044645
| 0.335331
| 6,841
| 179
| 128
| 38.217877
| 0.758302
| 0.024996
| 0
| 0.859375
| 0
| 0
| 0.048634
| 0.006605
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015625
| false
| 0
| 0.046875
| 0
| 0.078125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2f3a44879e009771c7751e8614a645cd9d3890dd
| 147
|
py
|
Python
|
server/db/__init__.py
|
tetelevm/OrdeRPG
|
5bea9fbaf3fdd84ab14f7e3033e18eead2cf30ab
|
[
"MIT"
] | null | null | null |
server/db/__init__.py
|
tetelevm/OrdeRPG
|
5bea9fbaf3fdd84ab14f7e3033e18eead2cf30ab
|
[
"MIT"
] | null | null | null |
server/db/__init__.py
|
tetelevm/OrdeRPG
|
5bea9fbaf3fdd84ab14f7e3033e18eead2cf30ab
|
[
"MIT"
] | null | null | null |
"""
Everything related to the project database
"""
from .models import __all__ as __models_all__
from .models import *
__all__ = __models_all__
| 14.7
| 45
| 0.768707
| 19
| 147
| 5
| 0.578947
| 0.210526
| 0.336842
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 147
| 9
| 46
| 16.333333
| 0.772358
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2f4034630ca941fbeb16410ae906ffcd5c862bd2
| 52,309
|
py
|
Python
|
azure-mgmt-loganalytics/azure/mgmt/loganalytics/operations/workspaces_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-loganalytics/azure/mgmt/loganalytics/operations/workspaces_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-loganalytics/azure/mgmt/loganalytics/operations/workspaces_operations.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class WorkspacesOperations(object):
"""WorkspacesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def disable_intelligence_pack(
self, resource_group_name, workspace_name, intelligence_pack_name, custom_headers=None, raw=False, **operation_config):
"""Disables an intelligence pack for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param intelligence_pack_name: The name of the intelligence pack to be
disabled.
:type intelligence_pack_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks/{intelligencePackName}/Disable'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'intelligencePackName': self._serialize.url("intelligence_pack_name", intelligence_pack_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enable_intelligence_pack(
self, resource_group_name, workspace_name, intelligence_pack_name, custom_headers=None, raw=False, **operation_config):
"""Enables an intelligence pack for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param intelligence_pack_name: The name of the intelligence pack to be
enabled.
:type intelligence_pack_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks/{intelligencePackName}/Enable'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'intelligencePackName': self._serialize.url("intelligence_pack_name", intelligence_pack_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list_intelligence_packs(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the intelligence packs possible and whether they are enabled
or disabled for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list of :class:`IntelligencePack
<azure.mgmt.loganalytics.models.IntelligencePack>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: list of :class:`IntelligencePack
<azure.mgmt.loganalytics.models.IntelligencePack>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[IntelligencePack]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_shared_keys(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets the shared keys for a workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`SharedKeys
<azure.mgmt.loganalytics.models.SharedKeys>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`SharedKeys
<azure.mgmt.loganalytics.models.SharedKeys>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/sharedKeys'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_usages(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of usage metrics for a workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`UsageMetric
<azure.mgmt.loganalytics.models.UsageMetric>`
:rtype: :class:`UsageMetricPaged
<azure.mgmt.loganalytics.models.UsageMetricPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/usages'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsageMetricPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsageMetricPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_management_groups(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of management groups connected to a workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`ManagementGroup
<azure.mgmt.loganalytics.models.ManagementGroup>`
:rtype: :class:`ManagementGroupPaged
<azure.mgmt.loganalytics.models.ManagementGroupPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/managementGroups'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ManagementGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ManagementGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets workspaces in a resource group.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`Workspace
<azure.mgmt.loganalytics.models.Workspace>`
:rtype: :class:`WorkspacePaged
<azure.mgmt.loganalytics.models.WorkspacePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkspacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkspacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the workspaces in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of :class:`Workspace
<azure.mgmt.loganalytics.models.Workspace>`
:rtype: :class:`WorkspacePaged
<azure.mgmt.loganalytics.models.WorkspacePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.OperationalInsights/workspaces'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkspacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkspacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update a workspace.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param parameters: The parameters required to create or update a
workspace.
:type parameters: :class:`Workspace
<azure.mgmt.loganalytics.models.Workspace>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Workspace
<azure.mgmt.loganalytics.models.Workspace>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Workspace')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workspace', response)
if response.status_code == 201:
deserialized = self._deserialize('Workspace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a workspace instance.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: None or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a workspace instance.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`Workspace <azure.mgmt.loganalytics.models.Workspace>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`Workspace <azure.mgmt.loganalytics.models.Workspace>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workspace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_link_targets(
self, custom_headers=None, raw=False, **operation_config):
"""Get a list of workspaces which the current user has administrator
privileges and are not associated with an Azure Subscription. The
subscriptionId parameter in the Url is ignored.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list of :class:`LinkTarget
<azure.mgmt.loganalytics.models.LinkTarget>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: list of :class:`LinkTarget
<azure.mgmt.loganalytics.models.LinkTarget>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.OperationalInsights/linkTargets'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[LinkTarget]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_schema(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets the schema for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`SearchGetSchemaResponse
<azure.mgmt.loganalytics.models.SearchGetSchemaResponse>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`SearchGetSchemaResponse
<azure.mgmt.loganalytics.models.SearchGetSchemaResponse>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/schema'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchGetSchemaResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_search_results(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Submit a search for a given workspace. The response will contain an id
to track the search. User can use the id to poll the search status and
get the full search result later if the search takes long time to
finish. .
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param parameters: The parameters required to execute a search query.
:type parameters: :class:`SearchParameters
<azure.mgmt.loganalytics.models.SearchParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`SearchResultsResponse
<azure.mgmt.loganalytics.models.SearchResultsResponse>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
or :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/search'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SearchParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchResultsResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def update_search_results(
self, resource_group_name, workspace_name, id, custom_headers=None, raw=False, **operation_config):
"""Gets updated search results for a given search query.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param id: The id of the search that will have results updated. You
can get the id from the response of the GetResults call.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: :class:`SearchResultsResponse
<azure.mgmt.loganalytics.models.SearchResultsResponse>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if
raw=true
:rtype: :class:`SearchResultsResponse
<azure.mgmt.loganalytics.models.SearchResultsResponse>` or
:class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/search/{id}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'id': self._serialize.url("id", id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchResultsResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| 47.040468
| 198
| 0.663347
| 5,526
| 52,309
| 6.07528
| 0.047412
| 0.027702
| 0.034433
| 0.03217
| 0.93569
| 0.934022
| 0.934022
| 0.929674
| 0.922406
| 0.919576
| 0
| 0.006815
| 0.239825
| 52,309
| 1,111
| 199
| 47.082808
| 0.837466
| 0.284597
| 0
| 0.868705
| 0
| 0.007194
| 0.187425
| 0.096993
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046763
| false
| 0
| 0.008993
| 0
| 0.127698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2f6885a12eae50f2bc2ba3337832aace4d25f5ce
| 20,858
|
py
|
Python
|
core/tests/test_views.py
|
softwaydev/ca
|
7bf730b9aec9e1c27dc0dff2de286ff83a3cc954
|
[
"MIT"
] | 8
|
2017-06-16T10:45:27.000Z
|
2020-01-01T14:51:27.000Z
|
core/tests/test_views.py
|
softwaydev/ca
|
7bf730b9aec9e1c27dc0dff2de286ff83a3cc954
|
[
"MIT"
] | 66
|
2017-05-12T14:33:00.000Z
|
2020-05-13T13:04:13.000Z
|
core/tests/test_views.py
|
softwaydev/ca
|
7bf730b9aec9e1c27dc0dff2de286ff83a3cc954
|
[
"MIT"
] | 4
|
2017-05-16T17:48:17.000Z
|
2021-02-12T09:44:22.000Z
|
from OpenSSL import crypto
from django.test import TestCase
from django.contrib.auth.models import User
from django.urls import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from core.tests import factories
from core import models
class RootCrtExists(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
factories.RootCrt.create()
def test_auth(self):
response = self.client.get(reverse('root_crt_exists'))
redirect_url = reverse('login') + '?next=' + reverse('root_crt_exists')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_exists'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/root_certificate_managing/already_exists.html')
class ChoiceRootCrtView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
def test_auth(self):
response = self.client.get(reverse('root_crt'))
redirect_url = reverse('login') + '?next=' + reverse('root_crt')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/root_certificate_managing/crt_choice.html')
def test_root_crt_exists(self):
factories.RootCrt.create()
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt'))
self.assertRedirects(response, reverse('root_crt_exists'))
class RootCrtUploadExistingView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
def test_auth(self):
response = self.client.get(reverse('root_crt_upload_existing'))
redirect_url = reverse('login') + '?next=' + reverse('root_crt_upload_existing')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_upload_existing'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/root_certificate_managing/upload_existing.html')
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_upload_existing'))
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('root_crt')))
self.assertEqual(response.context['breadcrumbs'][1], ('Load root certificate', ''))
# в первом приближении
def test_success_url(self):
self.client.force_login(user=self.user)
crt = SimpleUploadedFile('rootCA.crt', factories.root_crt_all_fields)
key = SimpleUploadedFile('rootCA.key', factories.root_key_all_fields)
response = self.client.post(reverse('root_crt_upload_existing'), {'crt': crt, 'key': key})
self.assertRedirects(response, reverse('root_crt_view'))
def test_root_crt_exists(self):
factories.RootCrt.create()
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_upload_existing'))
self.assertRedirects(response, reverse('root_crt_exists'))
class RootCrtView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
factories.RootCrt.create()
def test_auth(self):
response = self.client.get(reverse('root_crt_upload_existing'))
redirect_url = reverse('login') + '?next=' + reverse('root_crt_upload_existing')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_view'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/root_certificate_managing/view.html')
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_view'))
cert = crypto.load_certificate(crypto.FILETYPE_PEM, factories.root_crt_all_fields).get_subject()
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('index')))
self.assertEqual(response.context['breadcrumbs'][1], ('View root certificate', ''))
self.assertEqual(response.context['cert'], cert)
self.assertEqual(str(response.context['crt_validity_period']), '2018-05-29 10:26:55')
def test_initial_form(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_view'))
self.assertIn(factories.root_crt_all_fields.decode(), str(response.context['form']))
self.assertIn(factories.root_key_all_fields.decode(), str(response.context['form']))
def test_root_crt_not_exists(self):
models.RootCrt.objects.all().delete()
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_view'))
self.assertEqual(response.status_code, 404)
class RootCrtDeleteView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
factories.RootCrt.create()
def test_auth(self):
response = self.client.get(reverse('root_crt_delete'))
redirect_url = reverse('login') + '?next=' + reverse('root_crt_delete')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_delete'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/root_certificate_managing/delete.html')
def test_root_crt_not_exists(self):
models.RootCrt.objects.all().delete()
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_delete'))
self.assertEqual(response.status_code, 404)
# в первом приближении
def test_delete(self):
self.client.force_login(user=self.user)
response = self.client.post(reverse('root_crt_delete'))
self.assertEqual(models.RootCrt.objects.all().count(), 0)
self.assertRedirects(response, reverse('root_crt'))
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_delete'))
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('index')))
self.assertEqual(response.context['breadcrumbs'][1], ('View certificate', reverse('root_crt_view')))
self.assertEqual(response.context['breadcrumbs'][2], ('Delete root certificate', ''))
class RootCrtGenerateNewView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
def test_auth(self):
response = self.client.get(reverse('root_crt_generate_new'))
redirect_url = reverse('login') + '?next=' + reverse('root_crt_generate_new')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_generate_new'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/root_certificate_managing/generate_new.html')
def test_root_crt_exists(self):
factories.RootCrt.create()
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_generate_new'))
self.assertRedirects(response, reverse('root_crt_exists'))
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('root_crt_generate_new'))
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('root_crt')))
self.assertEqual(response.context['breadcrumbs'][1], ('Generate root certificate', ''))
# в первом приближеии
def test_success_url(self):
self.client.force_login(user=self.user)
response = self.client.post(reverse('root_crt_generate_new'), {'country': 'ru', 'state': 'moscow', 'location': 'moscow',
'organization': 'soft-way', 'organizational_unit_name':
'soft-way', 'common_name': '127.0.0.1', 'email':
'test44@gmail.com', 'validity_period': '2032-05-29'})
self.assertEqual(models.RootCrt.objects.all().count(), 1)
self.assertRedirects(response, reverse('root_crt_view'))
class CertificatesSearch(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
factories.RootCrt.create()
def test_auth(self):
response = self.client.get(reverse('certificates_search'))
redirect_url = reverse('login') + '?next=' + reverse('certificates_search')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_search'), {'sort': 'cn'})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/certificate/search.html')
def test_root_crt_not_exists(self):
models.RootCrt.objects.all().delete()
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_search'))
self.assertRedirects(response, reverse('root_crt'))
def test_search(self):
factories.SiteCrt.create()
factories.SiteCrt.create(cn='127.0.0.2')
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_search'), {'cn': '127.0.0.1', 'sort': 'cn'})
self.assertEqual(len(response.context['object_list']), 1)
class CertificatesCreateView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
factories.RootCrt.create()
def test_auth(self):
response = self.client.get(reverse('certificates_create'))
redirect_url = reverse('login') + '?next=' + reverse('certificates_create')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_create'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/certificate/create.html')
def test_root_crt_not_exists(self):
models.RootCrt.objects.all().delete()
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_create'))
self.assertRedirects(response, reverse('root_crt'))
def test_success_url(self):
self.client.force_login(user=self.user)
response = self.client.post(reverse('certificates_create'), {'cn': '127.0.0.1', 'validity_period': '2019-05-29'})
self.assertEqual(models.SiteCrt.objects.get().cn, '127.0.0.1')
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_create'))
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('index')))
self.assertEqual(response.context['breadcrumbs'][1], ('Create new certificate', ''))
class CertificatesUploadExistingView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
factories.RootCrt.create()
def test_auth(self):
response = self.client.get(reverse('certificates_upload_existing'))
redirect_url = reverse('login') + '?next=' + reverse('certificates_upload_existing')
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_upload_existing'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/certificate/upload_existing.html')
def test_root_crt_not_exists(self):
models.RootCrt.objects.all().delete()
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_upload_existing'))
self.assertRedirects(response, reverse('root_crt'))
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_upload_existing'))
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('index')))
self.assertEqual(response.context['breadcrumbs'][1], ('Load an existing certificate', ''))
# в первом приближении
def test_form_valid_files(self):
self.client.force_login(user=self.user)
response = self.client.post(reverse('certificates_upload_existing'),
{'crt_file': SimpleUploadedFile('test.crt', factories.site_crt_all_fields),
'key_file': SimpleUploadedFile('test.key', factories.site_key_all_fields)})
self.assertEqual(models.SiteCrt.objects.all().count(), 1)
def test_form_valid_text(self):
self.client.force_login(user=self.user)
response = self.client.post(reverse('certificates_upload_existing'), {'crt_text': factories.site_crt_all_fields.decode(),
'key_text': factories.site_key_all_fields.decode()})
self.assertEqual(models.SiteCrt.objects.all().count(), 1)
class CertificatesView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
factories.RootCrt.create()
factories.SiteCrt.create()
def test_auth(self):
response = self.client.get(reverse('certificates_view', kwargs={'pk': '1'}))
redirect_url = reverse('login') + '?next=' + reverse('certificates_view', kwargs={'pk': '1'})
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_view', kwargs={'pk': '1'}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/certificate/view.html')
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_view', kwargs={'pk': '1'}))
cert = crypto.load_certificate(crypto.FILETYPE_PEM, factories.site_crt_all_fields).get_subject()
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('index')))
self.assertEqual(response.context['breadcrumbs'][1], ('View %s' % cert.CN, ''))
self.assertEqual(response.context['cert'], cert)
self.assertEqual(str(response.context['crt_validity_period']), '2019-05-29 13:08:33')
def test_initial_form(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_view', kwargs={'pk': '1'}))
self.assertIn(factories.site_crt_all_fields.decode(), str(response.context['form']))
self.assertIn(factories.site_key_all_fields.decode(), str(response.context['form']))
def test_root_crt_not_exists(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_view', kwargs={'pk': '2'}))
self.assertEqual(response.status_code, 404)
class CertificatesDeleteView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
factories.RootCrt.create()
factories.SiteCrt.create()
def test_auth(self):
response = self.client.get(reverse('certificates_delete', kwargs={'pk': '1'}))
redirect_url = reverse('login') + '?next=' + reverse('certificates_delete', kwargs={'pk': '1'})
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_delete', kwargs={'pk': '1'}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/certificate/delete.html')
def test_site_crt_not_exists(self):
models.SiteCrt.objects.all().delete()
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_delete', kwargs={'pk': '1'}))
self.assertEqual(response.status_code, 404)
def test_root_crt_not_exists(self):
models.RootCrt.objects.all().delete()
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_delete', kwargs={'pk': '1'}))
self.assertRedirects(response, reverse('root_crt'))
# в первом приближении
def test_delete(self):
self.client.force_login(user=self.user)
response = self.client.post(reverse('certificates_delete', kwargs={'pk': '1'}))
self.assertEqual(models.SiteCrt.objects.all().count(), 0)
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_delete', kwargs={'pk': '1'}))
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('index')))
self.assertEqual(response.context['breadcrumbs'][1], ('View %s' % models.SiteCrt.objects.get().cn,
reverse('certificates_view', kwargs={'pk': 1})))
self.assertEqual(response.context['breadcrumbs'][2], ('Delete %s' % models.SiteCrt.objects.get().cn, ''))
class CertificatesRecreateView(TestCase):
def setUp(self):
self.user = User.objects.create(
username='Serega',
password='passwd',
)
factories.RootCrt.create()
factories.SiteCrt.create()
def test_auth(self):
response = self.client.get(reverse('certificates_recreate', kwargs={'pk': '1'}))
redirect_url = reverse('login') + '?next=' + reverse('certificates_recreate', kwargs={'pk': '1'})
self.assertRedirects(response, redirect_url)
def test_smoke(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_recreate', kwargs={'pk': '1'}))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'core/certificate/recreate.html')
def test_root_crt_not_exists(self):
models.RootCrt.objects.all().delete()
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_recreate', kwargs={'pk': '1'}))
self.assertRedirects(response, reverse('root_crt'))
def test_site_crt_not_exists(self):
models.SiteCrt.objects.all().delete()
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_recreate', kwargs={'pk': '1'}))
self.assertEqual(response.status_code, 404)
def test_context(self):
self.client.force_login(user=self.user)
response = self.client.get(reverse('certificates_recreate', kwargs={'pk': '1'}))
self.assertEqual(response.context['breadcrumbs'][0], ('Home', reverse('index')))
self.assertEqual(response.context['breadcrumbs'][1], ('View %s' % models.SiteCrt.objects.get().cn,
reverse('certificates_view', kwargs={'pk': '1'})))
self.assertEqual(response.context['breadcrumbs'][2], ('Recreate certificate', ''))
# в первом приближении
def test_recreation(self):
self.client.force_login(user=self.user)
response = self.client.post(reverse('certificates_recreate', kwargs={'pk': '1'}), {'validity_period': '2020-05-29'})
self.assertRedirects(response, reverse('certificates_view', kwargs={'pk': '1'}))
self.assertEqual(str(models.SiteCrt.objects.get().date_end)[:10], '2020-05-29')
| 36.721831
| 129
| 0.658596
| 2,388
| 20,858
| 5.583752
| 0.063652
| 0.076496
| 0.076946
| 0.077171
| 0.905805
| 0.882931
| 0.864632
| 0.826384
| 0.772986
| 0.765562
| 0
| 0.011704
| 0.201218
| 20,858
| 567
| 130
| 36.786596
| 0.788608
| 0.005945
| 0
| 0.724868
| 0
| 0
| 0.158151
| 0.054952
| 0
| 0
| 0
| 0
| 0.238095
| 1
| 0.18254
| false
| 0.031746
| 0.018519
| 0
| 0.232804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2f83160631d44f5df2722c683a429e318911783d
| 23,753
|
py
|
Python
|
salt_2s.py
|
drhay53/SALT2X
|
5d51ded9d6feabe9846edc2f75f7956efbc38fa4
|
[
"MIT"
] | null | null | null |
salt_2s.py
|
drhay53/SALT2X
|
5d51ded9d6feabe9846edc2f75f7956efbc38fa4
|
[
"MIT"
] | null | null | null |
salt_2s.py
|
drhay53/SALT2X
|
5d51ded9d6feabe9846edc2f75f7956efbc38fa4
|
[
"MIT"
] | null | null | null |
import numpy as np
import sncosmo
import matplotlib as mpl
from astropy.table import Table, Column
mpl.use('Agg')
import matplotlib.pyplot as plt
import os
from Salt2X import *
import helpers
import fitters
import emcee
import sys
from iminuit import describe
import argparse
import os
import traceback
import astropy
from IPython import embed
from sncosmo import photdata
import time
# change
import sfdmap
parser = argparse.ArgumentParser()
parser.add_argument('--emcee', dest='emcee', action='store_true')
parser.add_argument('--jla', dest='jla', action='store_true')
parser.add_argument('--cadencesim', dest='cadencesim', action='store_true')
parser.add_argument('--noskew', dest='noskew', action='store_true')
parser.add_argument('--nsamp', dest='nsamp', default=5000, type=float)
parser.add_argument('--specific', '-s', dest='specific', default=None, type=str)
args = parser.parse_args()
modeldir = os.environ['SNCOSMO_MODELDIR']
scratch = os.environ['SCRATCH']
scratch = '.'
def emcee_chain_maxlike( chain, key ):
maxlike = np.argmax( chain['lnprob'] )
return chain[key][maxlike]
if args.jla:
# zeropoints taken from the JLA magnitude system files
standard_zps = {'STANDARD::U':9.724, 'STANDARD::B':9.907, 'STANDARD::V':9.464, 'STANDARD::R':9.166, 'STANDARD::I':8.846}
FourShooter_zps = {'4SHOOTER2::Us':9.724, '4SHOOTER2::B':9.8744, '4SHOOTER2::V':9.4789, '4SHOOTER2::R':9.1554, '4SHOOTER2::I':8.8506}
keplercam_zps = {'KEPLERCAM::Us':9.6922, 'KEPLERCAM::B':9.8803, 'KEPLERCAM::V':9.4722, 'KEPLERCAM::r':9.3524, 'KEPLERCAM::i':9.2542}
swope_zps = {'SWOPE2::u':10.514, 'SWOPE2::g':9.64406, 'SWOPE2::r':9.3516, 'SWOPE2::i':9.2500, 'SWOPE2::B':9.876433, 'swope2::v_lc3009':9.471276, 'swope2::v_lc3014':9.476626, 'swope2::v_lc9844':9.477482}
sdss_zps = {'SDSS::u':0.06791, 'SDSS::g':-0.02028, 'SDSS::r':-0.00493, 'SDSS::i':-0.01780, 'SDSS::z':-0.01015}
# registering the JLA bandpasses with sncosmo
helpers.get_JLA_bandpasses()
helpers.register_JLA_magsys()
if not os.path.exists('./fit_results'):
os.makedirs('./fit_results')
lcfile = './JLA/jla_light_curves/jla_lc.txt'
lc = np.genfromtxt(lcfile, dtype=None)
restcut = (3000,7000)
for i, sn in enumerate(lc):
if args.specific is not None:
if sn != args.specific:
continue
print '*' * 60
print sn, i
# get the light curve and if it exists, the covmat
data = sncosmo.read_lc(sn, format='salt2')
covmat = sn.replace('lc-', 'covmat_lc-')
covmat = covmat.replace('.list', '.dat')
if os.path.isfile(covmat):
covmat = np.loadtxt(covmat, skiprows=1)
c = Column(covmat,'Fluxcov')
data.add_column(c)
else:
covmat = np.diag(data['Fluxerr']**2)
c = Column(covmat,'Fluxcov')
data.add_column(c)
# deal with the different metadata keywords
try:
z = data.meta['Redshift']
except:
pass
try:
z = data.meta['Z_CMB']
except:
pass
try:
survey = data.meta['SURVEY']
except:
pass
try:
survey = data.meta['SET']
except:
pass
nickname = data.meta['SN']
try:
nickname = str(int(nickname))
except:
pass
#rename columns
data.rename_column('Filter', 'tmp')
data.rename_column('Date', 'time')
data.rename_column('Flux', 'flux')
data.rename_column('Fluxerr', 'fluxerr')
data.rename_column('Fluxcov', 'cov')
data.rename_column('MagSys', 'zpsys')
data.rename_column('ZP', 'zp')
# SNLS need special bandpasses, and we have to make a new column to deal with dtype issues
if survey == 'SNLS':
sn_nickname = sn.split('/')[-1].split('.')[0].split('-')[-1]
band = []
for j, bp in enumerate(data['tmp']):
band.append( '%s-%s' %(sn_nickname, bp) )
band = astropy.table.Column(band, name='band')
data.add_column(band)
data.remove_column('tmp')
else:
data.rename_column('tmp', 'band')
# deal with swope filters
mask = (data['band'] == 'SWOPE2::V')
nswopev = len(mask.nonzero()[0])
if nswopev > 0:
band = []
for j, bp in enumerate(data['band']):
if (bp == 'SWOPE2::V'):
if (data['time'][j] < 53749):
band.append('swope2::v_lc3014')
elif (data['time'][j] < 53760):
band.append('swope2::v_lc3009')
else:
band.append('swope2::v_lc9844')
else:
band.append(bp)
data.remove_column('band')
band = astropy.table.Column(band, name='band')
data.add_column(band)
ind = np.where( (data['band'] == 'SWOPE2::V') & (data['time']>53749.) & ((data['time']<=53760.)) )
data['band'][ind] = 'swope2::v_lc3009'
ind = np.where( (data['band'] == 'SWOPE2::V') & (data['time']>53760.) )
data['band'][ind] = 'swope2::v_lc9844'
# print ind
#deal with filter coverage
#also deal with STANDARD filter zeropoints
unique_bands = np.unique(data['band'])
fit_bands = []
nofit_bands = []
# print unique_bands
for ub in unique_bands:
print ub
bp = sncosmo.get_bandpass(ub)
rest = bp.wave_eff / (1.0+z)
if (rest >= restcut[0]) & (rest <= restcut[1]):
fit_bands.append(ub)
else:
nofit_bands.append(ub)
if 'STANDARD' in ub:
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(standard_zps[ub])
if '4SHOOTER2' in ub:
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(FourShooter_zps[ub])
if 'KEPLERCAM' in ub:
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(keplercam_zps[ub])
if 'swope' in ub.lower():
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(swope_zps[ub])
if 'sdss' in ub.lower():
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(sdss_zps[ub])
for nfb in nofit_bands:
mask = np.array(data['band'] != nfb)
data = sncosmo.select_data(data,mask)
# build the normal salt model and the salt2x model
mwebv = data.meta['MWEBV']
dust = sncosmo.CCM89Dust()
if args.emcee:
if not os.path.exists('./plots/emcee/jla'):
os.makedirs('./plots/emcee/jla')
if not os.path.exists('./plots/emcee/jla/triangle'):
os.makedirs('./plots/emcee/jla/triangle')
if not os.path.exists('./plots/emcee/jla/salt'):
os.makedirs('./plots/emcee/jla/salt')
# make the 2stretch source, apply dust, set it to the right z
source = Salt2XSource(version='2.4', modeldir=modeldir)
model = sncosmo.Model(source=source, effects=[dust], effect_names=['mw'], effect_frames=['obs'])
SaltSource = sncosmo.SALT2Source(version='2.4', modeldir=modeldir)
SaltModel = sncosmo.Model(source=SaltSource, effects=[dust], effect_names=['mw'], effect_frames=['obs'])
model.set(z=z, mwebv=float(mwebv), mwr_v=3.1)
SaltModel.set(z=z, mwebv=float(mwebv), mwr_v=3.1)
if args.noskew:
emfit = fitters.emcee_salt_fit_noskew(data,model,SaltModel)
else:
emfit = fitters.emcee_salt_fit(data,model,SaltModel)
try:
try:
cov, res = emfit.normal_salt_fit(nickname)
except:
traceback.print_exc()
print 'normal_salt_fit failed exception'
continue
nsamp = args.nsamp
t0 = time.time()
print 'sampling %s times...' %(args.nsamp*emfit.nwalkers)
fit = emfit.run(nsamples=nsamp)
print 'time %s samples: %s' %(nsamp*emfit.nwalkers, time.time()-t0)
# make the chains into a dictionary keyed by param name
chains = emfit.chain_dict(fit)
# occasionally samples go into x0 < 0. Rather than restricting
# at the likelihood level, we sample longer until we have
# enough valid chains
# in this final implementation of the code I don't think
# the while loop is ever actually entered for JLA
ind = np.where(~np.isnan(chains['mB']))
print 'good samples: %s' %len(chains['mB'][ind])
if len(chains['mB'][ind]) >= args.nsamp*emfit.nwalkers:
keep_going = False
else:
keep_going = True
ctr = 0
while keep_going:
ctr += 1
print 'sampling %i00 times...' %args.nsamp
fit = emfit.keep_going(fit, float(mwebv), nsamples=nsamp)
print 'time %s samples: %s' %(nsamp, time.time()-t0)
chains = emfit.chain_dict(fit)
ind = np.where(~np.isnan(chains['mB']))
print 'good samples: %s' %len(chains['mB'][ind])
if len(chains['mB'][ind]) >= args.nsamp*100:
keep_going = False
if ctr >= 5:
print 'Sampled 5 times. Just stopping'
print 'good samples: %s' %len(chains['mB'][ind])
keep_going = False
print 'finally done sampling!'
# these are just crude errors to be printed in the LC plots
err = {}
for k in chains.keys():
e = np.percentile(chains[k], [16,84])
err[k] = 0.5*(e[1]-e[0])
# using the maximum likelihood sample just for plotting purposes
maxlike = np.argmax(chains['lnprob'])
model.set(t0=chains['t0'][maxlike], x1=chains['x1'][maxlike], s=chains['s'][maxlike], c=chains['c'][maxlike], x0=chains['x0'][maxlike], z=z, mwebv=float(mwebv))
# the errors passed in here are errors in the measured parameters. Best-fit taken from maximum likelihood sample
sncosmo.plot_lc(emfit.data, model=model, errors=err, fname='./plots/emcee/jla/%s.pdf' %(nickname),color='black')
if args.noskew:
triangle_keys = ['mB', 'c', 't0', 'x1']
else:
triangle_keys = ['mB', 'c','s', 't0', 'x1']
helpers.save(chains, './chains/%s.chains' %(nickname))
# triangle plots
emfit.plots(chains, nickname, triangle_keys, outdir='./plots/emcee/jla/triangle')
except Exception as e:
# as of final release of the code no JLA SNe fail the try except:
# occasionally a simulated LC will fail, usually due to poor S/N
traceback.print_exc()
continue
# output lcfit file
outdir = os.path.abspath('./fit_results/emcee/JLA/%s' %(nickname))
if not os.path.exists(outdir):
os.makedirs(outdir)
if args.noskew:
params = [chains['x0'][maxlike],chains['x1'][maxlike],chains['c'][maxlike],chains['t0'][maxlike]]
else:
params = [chains['x0'][maxlike],chains['x1'][maxlike],chains['s'][maxlike],chains['c'][maxlike],chains['t0'][maxlike]]
chisq = emfit.chi2(params)
dof = len(emfit.data['flux'])
print chisq, dof
#calculate mB
# dumps statistics from the chains to a file, though the standardization code uses the chain files directly
helpers.dump_emcee_results(chains, outdir, nickname, z, chisq, dof, survey)
outfile.close()
if args.cadencesim:
# The zeropoints in the sim files are already corrected for the magnitude systems registered to sncosmo
standard_zps = {'STANDARD::U':0, 'STANDARD::B':0, 'STANDARD::V':0, 'STANDARD::R':0, 'STANDARD::I':0}
FourShooter_zps = {'4SHOOTER2::Us':0, '4SHOOTER2::B':0, '4SHOOTER2::V':0, '4SHOOTER2::R':0, '4SHOOTER2::I':0}
keplercam_zps = {'KEPLERCAM::Us':0, 'KEPLERCAM::B':0, 'KEPLERCAM::V':0, 'KEPLERCAM::r':0, 'KEPLERCAM::i':0}
swope_zps = {'SWOPE2::u':0, 'SWOPE2::g':0, 'SWOPE2::r':0, 'SWOPE2::i':0, 'SWOPE2::B':0, 'swope2::v_lc3009':0, 'swope2::v_lc3014':0, 'swope2::v_lc9844':0}
sdss_zps = {'SDSS::u':0, 'SDSS::g':0, 'SDSS::r':0, 'SDSS::i':0, 'SDSS::z':0}
# registering the JLA bandpasses with sncosmo
helpers.get_JLA_bandpasses()
helpers.register_JLA_magsys()
if not os.path.exists('./fit_results'):
os.makedirs('./fit_results')
lcfile = './cadence_sim/lc/sim_lc.txt'
lc = np.genfromtxt(lcfile, dtype=None)
restcut = (3000,7000)
for i, sn in enumerate(lc):
if args.specific is not None:
if sn != args.specific:
continue
print '*' * 60
print sn, i
# get the light curve
data = sncosmo.read_lc(sn, format='salt2')
covmat = np.diag(data['FluxPsferr']**2)
c = Column(covmat,'Fluxcov')
data.add_column(c)
# deal with the different metadata keywords
try:
z = data.meta['REDSHIFT']
except:
pass
try:
z = data.meta['Z_CMB']
except:
pass
try:
survey = data.meta['SURVEY']
except:
pass
try:
survey = data.meta['SET']
except:
pass
nickname = data.meta['SN']
band_nickname = data.meta['SN']
nickname = sn.split('.')[0].split('/')[-1]
try:
nickname = str(int(nickname))
except:
pass
#rename some columns
data.rename_column('Filter', 'tmp')
data.rename_column('Date', 'time')
data.rename_column('FluxPsf', 'flux')
data.rename_column('FluxPsferr', 'fluxerr')
data.rename_column('Fluxcov', 'cov')
data.rename_column('MagSys', 'zpsys')
data.rename_column('ZP', 'zp')
# SNLS need special bandpasses, and we have to make a new column to deal with dtype issues
if survey == 'SNLS':
sn_nickname = sn.split('/')[-1].split('.')[0].split('-')[-1]
band = []
for j, bp in enumerate(data['tmp']):
band.append( '%s-%s' %(band_nickname, bp) )
band = astropy.table.Column(band, name='band')
data.add_column(band)
data.remove_column('tmp')
else:
data.rename_column('tmp', 'band')
# deal with swope filters
mask = (data['band'] == 'SWOPE2::V')
nswopev = len(mask.nonzero()[0])
if nswopev > 0:
band = []
for j, bp in enumerate(data['band']):
if (bp == 'SWOPE2::V'):
if (data['time'][j] < 53749):
band.append('swope2::v_lc3014')
elif (data['time'][j] < 53760):
band.append('swope2::v_lc3009')
else:
band.append('swope2::v_lc9844')
else:
band.append(bp)
data.remove_column('band')
band = astropy.table.Column(band, name='band')
data.add_column(band)
ind = np.where( (data['band'] == 'SWOPE2::V') & (data['time']>53749.) & ((data['time']<=53760.)) )
data['band'][ind] = 'swope2::v_lc3009'
ind = np.where( (data['band'] == 'SWOPE2::V') & (data['time']>53760.) )
data['band'][ind] = 'swope2::v_lc9844'
# print ind
#deal with filter coverage
#also deal with STANDARD filter zeropoints
unique_bands = np.unique(data['band'])
fit_bands = []
nofit_bands = []
for ub in unique_bands:
bp = sncosmo.get_bandpass(ub)
rest = bp.wave_eff / (1.0+z)
if (rest >= restcut[0]) & (rest <= restcut[1]):
fit_bands.append(ub)
else:
nofit_bands.append(ub)
if 'STANDARD' in ub:
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(standard_zps[ub])
if '4SHOOTER2' in ub:
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(FourShooter_zps[ub])
if 'KEPLERCAM' in ub:
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(keplercam_zps[ub])
if 'swope' in ub.lower():
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(swope_zps[ub])
if 'sdss' in ub.lower():
ind = np.where(data['band'] == ub)
data['zp'][ind] = data['zp'][ind] - float(sdss_zps[ub])
print ub, bp.wave_eff, rest
for nfb in nofit_bands:
mask = np.array(data['band'] != nfb)
data = sncosmo.select_data(data,mask)
mwebv = data.meta['MWEBV']
dust = sncosmo.CCM89Dust()
if args.emcee:
if not os.path.exists('./plots/emcee/cadencesim'):
os.makedirs('./plots/emcee/cadencesim')
if not os.path.exists('./plots/emcee/cadencesim/triangle'):
os.makedirs('./plots/emcee/cadencesim/triangle')
if not os.path.exists('./plots/emcee/cadencesim/salt'):
os.makedirs('./plots/emcee/cadencesim/salt')
# make the 2stretch source, apply dust, set it to the right z
source = Salt2XSource(version='2.4', modeldir=modeldir)
model = sncosmo.Model(source=source, effects=[dust], effect_names=['mw'], effect_frames=['obs'])
SaltSource = sncosmo.SALT2Source(version='2.4', modeldir=modeldir)
SaltModel = sncosmo.Model(source=SaltSource, effects=[dust], effect_names=['mw'], effect_frames=['obs'])
model.set(z=z, mwebv=float(mwebv), mwr_v=3.1)
SaltModel.set(z=z, mwebv=float(mwebv), mwr_v=3.1)
if args.noskew:
emfit = fitters.emcee_salt_fit_noskew(data,model,SaltModel)
else:
emfit = fitters.emcee_salt_fit(data,model,SaltModel)
try:
try:
cov,res = emfit.normal_salt_fit(nickname)
data = emfit.data
invcov = emfit.invcov
except:
traceback.print_exc()
print 'normal_salt_fit failed exception'
continue
nsamp = args.nsamp
t0 = time.time()
print 'sampling %i times...' %(nsamp*emfit.nwalkers)
fit = emfit.run(nsamples=nsamp)
print 'time %s samples: %s' %(nsamp*emfit.nwalkers, time.time()-t0)
# make the chains into a dictionary keyed by param name
chains = emfit.chain_dict_x0(fit)
# occasionally samples go into x0 < 0. Rather than restricting
# at the likelihood level, we sample longer until we have
# enough valid chains
# in this final implementation of the code I don't think
# the while loop is ever actually entered for JLA
ind = np.where(~np.isnan(chains['mB']))
print 'good samples: %s' %len(chains['mB'][ind])
if len(chains['mB'][ind]) >= args.nsamp*emfit.nwalkers:
keep_going = False
else:
keep_going = True
ctr = 0
while keep_going:
ctr += 1
print 'sampling %s times...' %args.nsamp*emfit.nwalkers
fit = emfit.keep_going(fit, float(mwebv), nsamples=nsamp)
print 'time %s samples: %s' %(nsamp*emfit.nwalkers, time.time()-t0)
chains = emfit.chain_dict_x0(fit)
ind = np.where(~np.isnan(chains['mB']))
print 'good samples: %s' %len(chains['mB'][ind])
if len(chains['mB'][ind]) >= args.nsamp*emfit.nwalkers:
keep_going = False
if ctr >= 5:
print 'Sampled 5 times. Just stopping'
print 'good samples: %s' %len(chains['mB'][ind])
keep_going = False
print 'finally done sampling!'
# these are just crude errors to be printed in the LC plots
err = {}
for k in chains.keys():
e = np.percentile(chains[k], [16,84])
err[k] = 0.5*(e[1]-e[0])
# using the maximum likelihood sample just for plotting purposes
maxlike = np.argmax(chains['lnprob'])
model.set(t0=chains['t0'][maxlike], x1=chains['x1'][maxlike], s=chains['s'][maxlike], c=chains['c'][maxlike], x0=chains['x0'][maxlike], z=z, mwebv=float(mwebv))
# the errors passed in here are errors in the measured parameters. Best-fit taken from maximum likelihood sample
sncosmo.plot_lc(emfit.data, model=model, errors=err, fname='./plots/emcee/cadencesim/%s.pdf' %(nickname),color='black')
triangle_keys = ['mB', 'c', 't0', 'x1']
helpers.save(chains, './chains/%s.chains' %(nickname))
# triangle plots
emfit.plots(chains, nickname, triangle_keys, outdir='./plots/emcee/cadencesim/triangle')
except Exception as e:
# as of final release of the code no JLA SNe fail the try except:
# occasionally a simulated LC will fail, usually due to poor S/N
traceback.print_exc()
continue
# output lcfit file
outdir = os.path.abspath('./fit_results/emcee/cadencesim/%s' %(nickname))
if not os.path.exists(outdir):
os.makedirs(outdir)
if args.noskew:
params = [chains['x0'][maxlike],chains['x1'][maxlike],chains['c'][maxlike],chains['t0'][maxlike]]
else:
params = [chains['x0'][maxlike],chains['x1'][maxlike],chains['s'][maxlike],chains['c'][maxlike],chains['t0'][maxlike]]
chisq = emfit.chi2(params)
dof = len(emfit.data['flux'])
print chisq, dof
#calculate mB
# dumps statistics from the chains to a file, though the standardization code uses the chain files directly
helpers.dump_emcee_results(chains, outdir, nickname, z, chisq, dof, survey)
outfile.close()
| 42.340463
| 206
| 0.524355
| 2,818
| 23,753
| 4.352023
| 0.138751
| 0.01696
| 0.014677
| 0.015982
| 0.827055
| 0.806425
| 0.789547
| 0.78547
| 0.756849
| 0.756849
| 0
| 0.031479
| 0.333979
| 23,753
| 560
| 207
| 42.416071
| 0.743742
| 0.109965
| 0
| 0.785548
| 0
| 0
| 0.132897
| 0.023762
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.032634
| 0.04662
| null | null | 0.074592
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
85faf415426e576379087f5bd21d095c307a1f7c
| 12,010
|
py
|
Python
|
moonv4/moon_manager/moon_manager/api/data.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
moonv4/moon_manager/moon_manager/api/data.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
moonv4/moon_manager/moon_manager/api/data.py
|
hashnfv/hashnfv-moon
|
daaba34fa2ed4426bc0fde359e54a5e1b872208c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
# This software is distributed under the terms and conditions of the 'Apache-2.0'
# license which can be found in the file 'LICENSE' in this package distribution
# or at 'http://www.apache.org/licenses/LICENSE-2.0'.
"""
Data are elements used to create rules
"""
from flask import request
from flask_restful import Resource
from oslo_log import log as logging
from moon_utilities.security_functions import check_auth
from moon_db.core import PolicyManager
__version__ = "0.2.0"
LOG = logging.getLogger("moon.manager.api." + __name__)
class SubjectData(Resource):
"""
Endpoint for subject data requests
"""
__urls__ = (
"/policies/<string:uuid>/subject_data",
"/policies/<string:uuid>/subject_data/",
"/policies/<string:uuid>/subject_data/<string:category_id>",
"/policies/<string:uuid>/subject_data/<string:category_id>/"
"<string:data_id>",
)
@check_auth
def get(self, uuid=None, category_id=None, data_id=None, user_id=None):
"""Retrieve all subject categories or a specific one if sid is given
for a given policy
:param uuid: uuid of the policy
:param category_id: uuid of the subject category
:param data_id: uuid of the subject data
:param user_id: user ID who do the request
:return: [{
"policy_id": "policy_id1",
"category_id": "category_id1",
"data": {
"subject_data_id": {
"name": "name of the data",
"description": "description of the data"
}
}
}]
:internal_api: get_subject_data
"""
try:
data = PolicyManager.get_subject_data(user_id=user_id,
policy_id=uuid,
category_id=category_id,
data_id=data_id)
except Exception as e:
LOG.error(e, exc_info=True)
return {"result": False,
"error": str(e)}, 500
return {"subject_data": data}
@check_auth
def post(self, uuid=None, category_id=None, data_id=None, user_id=None):
"""Create or update a subject.
:param uuid: uuid of the policy
:param category_id: uuid of the subject category
:param data_id: uuid of the subject data
:param user_id: user ID who do the request
:request body: {
"name": "name of the data",
"description": "description of the data"
}
:return: {
"policy_id": "policy_id1",
"category_id": "category_id1",
"data": {
"subject_data_id": {
"name": "name of the data",
"description": "description of the data"
}
}
}
:internal_api: add_subject_data
"""
try:
data = PolicyManager.set_subject_data(user_id=user_id,
policy_id=uuid,
category_id=category_id,
value=request.json)
except Exception as e:
LOG.error(e, exc_info=True)
return {"result": False,
"error": str(e)}, 500
return {"subject_data": data}
@check_auth
def delete(self, uuid=None, category_id=None, data_id=None, user_id=None):
"""Delete a subject for a given policy
:param uuid: uuid of the policy
:param category_id: uuid of the subject category
:param data_id: uuid of the subject data
:param user_id: user ID who do the request
:return: [{
"result": "True or False",
"message": "optional message"
}]
:internal_api: delete_subject_data
"""
try:
data = PolicyManager.delete_subject_data(user_id=user_id,
policy_id=uuid,
data_id=data_id)
except Exception as e:
LOG.error(e, exc_info=True)
return {"result": False,
"error": str(e)}, 500
return {"result": True}
class ObjectData(Resource):
"""
Endpoint for object data requests
"""
__urls__ = (
"/policies/<string:uuid>/object_data",
"/policies/<string:uuid>/object_data/",
"/policies/<string:uuid>/object_data/<string:category_id>",
"/policies/<string:uuid>/object_data/<string:category_id>/"
"<string:data_id>",
)
@check_auth
def get(self, uuid=None, category_id=None, data_id=None, user_id=None):
"""Retrieve all object categories or a specific one if sid is given
for a given policy
:param uuid: uuid of the policy
:param category_id: uuid of the object category
:param data_id: uuid of the object data
:param user_id: user ID who do the request
:return: [{
"policy_id": "policy_id1",
"category_id": "category_id1",
"data": {
"object_data_id": {
"name": "name of the data",
"description": "description of the data"
}
}
}]
:internal_api: get_object_data
"""
try:
data = PolicyManager.get_object_data(user_id=user_id,
policy_id=uuid,
category_id=category_id,
data_id=data_id)
except Exception as e:
LOG.error(e, exc_info=True)
return {"result": False,
"error": str(e)}, 500
return {"object_data": data}
@check_auth
def post(self, uuid=None, category_id=None, data_id=None, user_id=None):
"""Create or update a object.
:param uuid: uuid of the policy
:param category_id: uuid of the object category
:param data_id: uuid of the object data
:param user_id: user ID who do the request
:request body: {
"name": "name of the data",
"description": "description of the data"
}
:return: {
"policy_id": "policy_id1",
"category_id": "category_id1",
"data": {
"object_data_id": {
"name": "name of the data",
"description": "description of the data"
}
}
}
:internal_api: add_object_data
"""
try:
data = PolicyManager.add_object_data(user_id=user_id,
policy_id=uuid,
category_id=category_id,
value=request.json)
except Exception as e:
LOG.error(e, exc_info=True)
return {"result": False,
"error": str(e)}, 500
return {"object_data": data}
@check_auth
def delete(self, uuid=None, category_id=None, data_id=None, user_id=None):
"""Delete a object for a given policy
:param uuid: uuid of the policy
:param category_id: uuid of the object category
:param data_id: uuid of the object data
:param user_id: user ID who do the request
:return: {
"result": "True or False",
"message": "optional message"
}
:internal_api: delete_object_data
"""
try:
data = PolicyManager.delete_object_data(user_id=user_id,
policy_id=uuid,
data_id=data_id)
except Exception as e:
LOG.error(e, exc_info=True)
return {"result": False,
"error": str(e)}, 500
return {"result": True}
class ActionData(Resource):
"""
Endpoint for action data requests
"""
__urls__ = (
"/policies/<string:uuid>/action_data",
"/policies/<string:uuid>/action_data/",
"/policies/<string:uuid>/action_data/<string:category_id>",
"/policies/<string:uuid>/action_data/<string:category_id>/"
"<string:data_id>",
)
@check_auth
def get(self, uuid=None, category_id=None, data_id=None, user_id=None):
"""Retrieve all action categories or a specific one if sid is given
for a given policy
:param uuid: uuid of the policy
:param category_id: uuid of the action category
:param data_id: uuid of the action data
:param user_id: user ID who do the request
:return: [{
"policy_id": "policy_id1",
"category_id": "category_id1",
"data": {
"action_data_id": {
"name": "name of the data",
"description": "description of the data"
}
}
}]
:internal_api: get_action_data
"""
try:
data = PolicyManager.get_action_data(user_id=user_id,
policy_id=uuid,
category_id=category_id,
data_id=data_id)
except Exception as e:
LOG.error(e, exc_info=True)
return {"result": False,
"error": str(e)}, 500
return {"action_data": data}
@check_auth
def post(self, uuid=None, category_id=None, data_id=None, user_id=None):
"""Create or update a action.
:param uuid: uuid of the policy
:param category_id: uuid of the action category
:param data_id: uuid of the action data
:param user_id: user ID who do the request
:request body: {
"name": "name of the data",
"description": "description of the data"
}
:return: {
"policy_id": "policy_id1",
"category_id": "category_id1",
"data": {
"action_data_id": {
"name": "name of the data",
"description": "description of the data"
}
}
}
:internal_api: add_action_data
"""
try:
data = PolicyManager.add_action_data(user_id=user_id,
policy_id=uuid,
category_id=category_id,
value=request.json)
except Exception as e:
LOG.error(e, exc_info=True)
return {"result": False,
"error": str(e)}, 500
return {"action_data": data}
@check_auth
def delete(self, uuid=None, category_id=None, data_id=None, user_id=None):
"""Delete a action for a given policy
:param uuid: uuid of the policy
:param category_id: uuid of the action category
:param data_id: uuid of the action data
:param user_id: user ID who do the request
:return: {
"result": "True or False",
"message": "optional message"
}
:internal_api: delete_action_data
"""
try:
data = PolicyManager.delete_action_data(user_id=user_id,
policy_id=uuid,
data_id=data_id)
except Exception as e:
LOG.error(e, exc_info=True)
return {"result": False,
"error": str(e)}, 500
return {"result": True}
| 35.744048
| 81
| 0.510325
| 1,320
| 12,010
| 4.448485
| 0.09697
| 0.039169
| 0.041383
| 0.033719
| 0.891519
| 0.839407
| 0.831233
| 0.831233
| 0.810627
| 0.807391
| 0
| 0.006895
| 0.39617
| 12,010
| 335
| 82
| 35.850746
| 0.802813
| 0.378851
| 0
| 0.75
| 0
| 0
| 0.130407
| 0.089403
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066176
| false
| 0
| 0.036765
| 0
| 0.279412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c82f67c1c5c3550f9b1e45f60f1e7a0cbcc94be8
| 29,474
|
py
|
Python
|
ml_params_tensorflow/ml_params/losses.py
|
SamuelMarks/ml-params-tensorflow
|
86fb92147443e69982d05755361b101f8a6f64e5
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ml_params_tensorflow/ml_params/losses.py
|
SamuelMarks/ml-params-tensorflow
|
86fb92147443e69982d05755361b101f8a6f64e5
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ml_params_tensorflow/ml_params/losses.py
|
SamuelMarks/ml-params-tensorflow
|
86fb92147443e69982d05755361b101f8a6f64e5
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
""" Generated Loss CLI parsers """
def binary_crossentropyConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the binary crossentropy loss.
Standalone usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.916 , 0.714], dtype=float32)
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--from_logits",
type=bool,
help="""Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a
probability distribution.""",
required=True,
default=False,
)
argument_parser.add_argument(
"--label_smoothing",
type=int,
help="Float in [0, 1]. If > `0` then smooth the labels.",
required=True,
default=0,
)
return (
argument_parser,
"```K.mean(K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)```",
)
def categorical_crossentropyConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the categorical crossentropy loss.
Standalone usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
"""
argument_parser.add_argument(
"--y_true", help="Tensor of one-hot true targets.", required=True
)
argument_parser.add_argument(
"--y_pred", help="Tensor of predicted targets.", required=True
)
argument_parser.add_argument(
"--from_logits",
type=bool,
help="""Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a
probability distribution.""",
required=True,
default=False,
)
argument_parser.add_argument(
"--label_smoothing",
type=int,
help="Float in [0, 1]. If > `0` then smooth the labels.",
required=True,
default=0,
)
return (
argument_parser,
"```K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)```",
)
def categorical_hingeConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Standalone usage:
>>> y_true = np.random.randint(0, 3, size=(2,))
>>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> pos = np.sum(y_true * y_pred, axis=-1)
>>> neg = np.amax((1. - y_true) * y_pred, axis=-1)
>>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.))
"""
argument_parser.add_argument(
"--y_true",
help="The ground truth values. `y_true` values are expected to be 0 or 1.",
required=True,
)
argument_parser.add_argument(
"--y_pred", help="The predicted values.", required=True
)
return argument_parser, "```math_ops.maximum(neg - pos + 1.0, zero)```"
def cosine_similarityConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the cosine similarity between labels and predictions.
Note that it is a number between -1 and 1. When it is a negative number
between -1 and 0, 0 indicates orthogonality and values closer to -1
indicate greater similarity. The values closer to 1 indicate greater
dissimilarity. This makes it usable as a loss function in a setting
where you try to maximize the proximity between predictions and
targets. If either `y_true` or `y_pred` is a zero vector, cosine
similarity will be 0 regardless of the proximity between predictions
and targets.
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Standalone usage:
>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
>>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1)
>>> loss.numpy()
array([-0., -0.999, 0.999], dtype=float32)
"""
argument_parser.add_argument(
"--y_true", help="Tensor of true targets.", required=True
)
argument_parser.add_argument(
"--y_pred", help="Tensor of predicted targets.", required=True
)
argument_parser.add_argument(
"--axis",
type=int,
help="Axis along which to determine similarity.",
required=True,
default=-1,
)
return (argument_parser, "```(-math_ops.reduce_sum(y_true * y_pred, axis=axis))```")
def hingeConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the hinge loss between `y_true` and `y_pred`.
`loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)`
Standalone usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="""The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided they will be converted to -1 or 1. shape = `[batch_size, d0, .. dN]`.""",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return (
argument_parser,
"```K.mean(math_ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1)```",
)
def huberConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes Huber loss value.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
"""
argument_parser.add_argument(
"--y_true", help="tensor of true targets.", required=True
)
argument_parser.add_argument(
"--y_pred", help="tensor of predicted targets.", required=True
)
argument_parser.add_argument(
"--delta",
type=float,
help="A float, the point where the Huber loss function changes from a quadratic to linear.",
required=True,
default=1.0,
)
return (
argument_parser,
"""```K.mean(array_ops.where_v2(abs_error <= delta, half * math_ops.pow(error, 2),
half * math_ops.pow(delta, 2) + delta * (abs_error - delta)), axis=-1)```""",
)
def kldConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1)
>>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1)
>>> assert np.array_equal(
... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1))
"""
argument_parser.add_argument(
"--y_true", help="Tensor of true targets.", required=True
)
argument_parser.add_argument(
"--y_pred", help="Tensor of predicted targets.", required=True
)
return (
argument_parser,
"```math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)```",
)
def kl_divergenceConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1)
>>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1)
>>> assert np.array_equal(
... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1))
"""
argument_parser.add_argument(
"--y_true", help="Tensor of true targets.", required=True
)
argument_parser.add_argument(
"--y_pred", help="Tensor of predicted targets.", required=True
)
return (
argument_parser,
"```math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)```",
)
def kullback_leibler_divergenceConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1)
>>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1)
>>> assert np.array_equal(
... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1))
"""
argument_parser.add_argument(
"--y_true", help="Tensor of true targets.", required=True
)
argument_parser.add_argument(
"--y_pred", help="Tensor of predicted targets.", required=True
)
return (
argument_parser,
"```math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)```",
)
def logcoshConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Standalone usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.logcosh(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> x = y_pred - y_true
>>> assert np.allclose(
... loss.numpy(),
... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1),
... atol=1e-5)
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return argument_parser, "```K.mean(_logcosh(y_pred - y_true), axis=-1)```"
def maeConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the mean absolute error between labels and predictions.
`loss = mean(abs(y_true - y_pred), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return (argument_parser, "```K.mean(math_ops.abs(y_pred - y_true), axis=-1)```")
def mapeConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)`
Standalone usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return argument_parser, "```(100.0 * K.mean(diff, axis=-1))```"
def mean_absolute_errorConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the mean absolute error between labels and predictions.
`loss = mean(abs(y_true - y_pred), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return (argument_parser, "```K.mean(math_ops.abs(y_pred - y_true), axis=-1)```")
def mean_absolute_percentage_errorConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)`
Standalone usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return argument_parser, "```(100.0 * K.mean(diff, axis=-1))```"
def mean_squared_errorConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the mean squared error between labels and predictions.
After computing the squared distance between the inputs, the mean value over
the last dimension is returned.
`loss = mean(square(y_true - y_pred), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return (
argument_parser,
"```K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)```",
)
def mean_squared_logarithmic_errorConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = np.maximum(y_true, 1e-7)
>>> y_pred = np.maximum(y_pred, 1e-7)
>>> assert np.allclose(
... loss.numpy(),
... np.mean(
... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return (
argument_parser,
"```K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)```",
)
def mseConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the mean squared error between labels and predictions.
After computing the squared distance between the inputs, the mean value over
the last dimension is returned.
`loss = mean(square(y_true - y_pred), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return (
argument_parser,
"```K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)```",
)
def msleConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)`
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = np.maximum(y_true, 1e-7)
>>> y_pred = np.maximum(y_pred, 1e-7)
>>> assert np.allclose(
... loss.numpy(),
... np.mean(
... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return (
argument_parser,
"```K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)```",
)
def poissonConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the Poisson loss between y_true and y_pred.
The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`.
Standalone usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.poisson(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_pred = y_pred + 1e-7
>>> assert np.allclose(
... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
... atol=1e-5)
"""
argument_parser.add_argument(
"--y_true",
help="Ground truth values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return (
argument_parser,
"```K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)```",
)
def ReductionConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Types of loss reduction.
Contains the following values:
* `AUTO`: Indicates that the reduction option will be determined by the usage
context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When
used with `tf.distribute.Strategy`, outside of built-in training loops such
as `tf.keras` `compile` and `fit`, we expect reduction value to be
`SUM` or `NONE`. Using `AUTO` in that case will raise an error.
* `NONE`: Weighted losses with one dimension reduced (axis=-1, or axis
specified by loss function). When this reduction type used with built-in
Keras training loops like `fit`/`evaluate`, the unreduced vector loss is
passed to the optimizer but the reported loss will be a scalar value.
* `SUM`: Scalar sum of weighted losses.
* `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
This reduction type is not supported when used with
`tf.distribute.Strategy` outside of built-in training loops like `tf.keras`
`compile`/`fit`.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size)
```
Please see the
[custom training guide](https://www.tensorflow.org/tutorials/distribute/custom_training) # pylint: disable=line-too-long
for more details on this."""
argument_parser.add_argument("--AUTO", required=True, default="auto")
argument_parser.add_argument("--NONE", required=True, default="none")
argument_parser.add_argument("--SUM", required=True, default="sum")
argument_parser.add_argument(
"--SUM_OVER_BATCH_SIZE", required=True, default="sum_over_batch_size"
)
return argument_parser
def sparse_categorical_crossentropyConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the sparse categorical crossentropy loss.
Standalone usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
"""
argument_parser.add_argument("--y_true", help="Ground truth values.", required=True)
argument_parser.add_argument(
"--y_pred", help="The predicted values.", required=True
)
argument_parser.add_argument(
"--from_logits",
type=bool,
help="""Whether `y_pred` is expected to be a logits tensor. By default, we assume that `y_pred` encodes a
probability distribution.""",
required=True,
default=False,
)
argument_parser.add_argument(
"--axis",
type=int,
help="(Optional) Defaults to -1. The dimension along which the entropy is computed.",
default=-1,
)
return argument_parser, "```None```"
def squared_hingeConfig(argument_parser):
"""
Set CLI arguments
:param argument_parser: argument parser
:type argument_parser: ```ArgumentParser```
:returns: argument_parser
:rtype: ```ArgumentParser```
"""
argument_parser.description = """Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`
Standalone usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.squared_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))
"""
argument_parser.add_argument(
"--y_true",
help="""The ground truth values. `y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1. shape = `[batch_size, d0, .. dN]`.""",
required=True,
)
argument_parser.add_argument(
"--y_pred",
help="The predicted values. shape = `[batch_size, d0, .. dN]`.",
required=True,
)
return (
argument_parser,
"```K.mean(math_ops.square(math_ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1)```",
)
__all__ = [
"binary_crossentropyConfig",
"categorical_crossentropyConfig",
"categorical_hingeConfig",
"cosine_similarityConfig",
"hingeConfig",
"huberConfig",
"kldConfig",
"kl_divergenceConfig",
"kullback_leibler_divergenceConfig",
"logcoshConfig",
"maeConfig",
"mapeConfig",
"mean_absolute_errorConfig",
"mean_absolute_percentage_errorConfig",
"mean_squared_errorConfig",
"mean_squared_logarithmic_errorConfig",
"mseConfig",
"msleConfig",
"poissonConfig",
"ReductionConfig",
"sparse_categorical_crossentropyConfig",
"squared_hingeConfig",
]
| 31.156448
| 121
| 0.640972
| 3,913
| 29,474
| 4.641452
| 0.081012
| 0.160335
| 0.01817
| 0.030283
| 0.825625
| 0.81208
| 0.79358
| 0.785486
| 0.780311
| 0.768087
| 0
| 0.018824
| 0.199735
| 29,474
| 945
| 122
| 31.189418
| 0.751177
| 0.119563
| 0
| 0.644444
| 1
| 0.065079
| 0.676575
| 0.129139
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.034921
| false
| 0.001587
| 0
| 0
| 0.069841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c078d61c3c5d1d1de714766d3635970a8aad24be
| 141
|
py
|
Python
|
kubespray_commands/commands/__init__.py
|
Magnitus-/server-setup-scripts
|
0c2537498132e4961d104dfbe828973b96c6cc14
|
[
"MIT"
] | null | null | null |
kubespray_commands/commands/__init__.py
|
Magnitus-/server-setup-scripts
|
0c2537498132e4961d104dfbe828973b96c6cc14
|
[
"MIT"
] | null | null | null |
kubespray_commands/commands/__init__.py
|
Magnitus-/server-setup-scripts
|
0c2537498132e4961d104dfbe828973b96c6cc14
|
[
"MIT"
] | null | null | null |
import click
from .generate_inventory_cmd import generate_inventory
@click.group()
def cli():
pass
cli.add_command(generate_inventory)
| 15.666667
| 54
| 0.801418
| 19
| 141
| 5.684211
| 0.631579
| 0.472222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120567
| 141
| 9
| 55
| 15.666667
| 0.870968
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0.166667
| 0.333333
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
c08937643c4c67d09e90b0076827b645d66cec8f
| 5,959
|
py
|
Python
|
streamselect/repository/test_repository.py
|
BenHals/streamselect
|
ca5e80f3a8a31a38ac52bccfd92528d73f387a6a
|
[
"BSD-3-Clause"
] | null | null | null |
streamselect/repository/test_repository.py
|
BenHals/streamselect
|
ca5e80f3a8a31a38ac52bccfd92528d73f387a6a
|
[
"BSD-3-Clause"
] | null | null | null |
streamselect/repository/test_repository.py
|
BenHals/streamselect
|
ca5e80f3a8a31a38ac52bccfd92528d73f387a6a
|
[
"BSD-3-Clause"
] | null | null | null |
from river import synth
from river.tree import HoeffdingTreeClassifier
from streamselect.concept_representations import ErrorRateRepresentation
from streamselect.repository import Repository
from streamselect.utils import Observation
def test_step_states() -> None:
"""Test step_all statistics."""
# pylint: disable="too-many-statements"
repo = Repository(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=lambda state_id: ErrorRateRepresentation(1, state_id),
)
steps = [10, 5, 20]
s1 = repo.add_next_state()
active_id = s1.state_id
assert len(repo.states) == 1
assert repo.states[s1.state_id] is s1
assert s1.active_seen_weight == 0
for _ in range(steps[0]):
repo.step_all(active_id)
assert len(repo.states) == 1
assert repo.states[s1.state_id] is s1
assert s1.active_seen_weight == steps[0]
assert s1.seen_weight == steps[0]
assert s1.weight_since_last_active == 0
s2 = repo.add_next_state()
active_id = s2.state_id
assert len(repo.states) == 2
assert repo.states[s2.state_id] is s2
assert s2.active_seen_weight == 0
for _ in range(steps[1]):
repo.step_all(active_id)
assert len(repo.states) == 2
assert repo.states[s2.state_id] is s2
assert s1.active_seen_weight == steps[0]
assert s1.seen_weight == steps[0] + steps[1]
assert s1.weight_since_last_active == steps[1]
assert s2.active_seen_weight == steps[1]
assert s2.seen_weight == steps[1]
assert s2.weight_since_last_active == 0
s3 = repo.add_next_state()
active_id = s3.state_id
assert len(repo.states) == 3
assert repo.states[s3.state_id] is s3
assert s3.active_seen_weight == 0
for _ in range(steps[2]):
repo.step_all(active_id)
assert len(repo.states) == 3
assert repo.states[s3.state_id] is s3
assert s1.active_seen_weight == steps[0]
assert s1.seen_weight == steps[0] + steps[1] + steps[2]
assert s1.weight_since_last_active == steps[1] + steps[2]
assert s2.active_seen_weight == steps[1]
assert s2.seen_weight == steps[1] + steps[2]
assert s2.weight_since_last_active == steps[2]
assert s3.active_seen_weight == steps[2]
assert s3.seen_weight == steps[2]
assert s3.weight_since_last_active == 0
active_id = s1.state_id
for _ in range(steps[0]):
repo.step_all(active_id)
assert len(repo.states) == 3
assert repo.states[s1.state_id] is s1
assert s1.active_seen_weight == 2 * steps[0]
assert s1.seen_weight == 2 * steps[0] + steps[1] + steps[2]
assert s1.weight_since_last_active == 0
assert s2.active_seen_weight == steps[1]
assert s2.seen_weight == steps[0] + steps[1] + steps[2]
assert s2.weight_since_last_active == steps[2] + steps[0]
assert s3.active_seen_weight == steps[2]
assert s3.seen_weight == steps[2] + steps[0]
assert s3.weight_since_last_active == steps[0]
assert len(repo.states) == 3
assert repo.states[s1.state_id] is s1
assert repo.states[s2.state_id] is s2
assert repo.states[s3.state_id] is s3
assert len(repo.base_transitions.adjacency_list) == 0
def test_state_predictions_active() -> None:
"""Test predictions in active mode"""
# pylint: disable="too-many-statements"
repo = Repository(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=lambda state_id: ErrorRateRepresentation(1, state_id),
)
dataset = synth.STAGGER()
s1 = repo.add_next_state()
active_id = s1.state_id
s1_test_classifier = HoeffdingTreeClassifier()
for t, (x, y) in enumerate(dataset.take(25)):
ob = Observation(x, y, t, active_id)
state_p = repo.get_repository_predictions(ob, "active")
pt = s1_test_classifier.predict_one(x)
assert state_p[active_id] == pt
repo.states[active_id].learn_one(ob)
s1_test_classifier.learn_one(x, y)
s2 = repo.add_next_state()
active_id = s2.state_id
s2_test_classifier = HoeffdingTreeClassifier()
for t, (x, y) in enumerate(dataset.take(25), start=25):
ob = Observation(x, y, t, active_id)
state_p = repo.get_repository_predictions(ob, "active")
print(state_p)
assert len(state_p) == 1
pt_1 = s1_test_classifier.predict_one(x)
pt_2 = s2_test_classifier.predict_one(x)
assert state_p[active_id] == pt_2
assert repo.states[s1.state_id].predict_one(ob) == pt_1
repo.states[active_id].learn_one(ob)
s2_test_classifier.learn_one(x, y)
def test_state_predictions_all() -> None:
"""Test predictions in all mode"""
# pylint: disable="too-many-statements"
repo = Repository(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=lambda state_id: ErrorRateRepresentation(1, state_id),
)
dataset = synth.STAGGER()
s1 = repo.add_next_state()
active_id = s1.state_id
s1_test_classifier = HoeffdingTreeClassifier()
for t, (x, y) in enumerate(dataset.take(25)):
ob = Observation(x, y, t, active_id)
state_p = repo.get_repository_predictions(ob, "all")
pt = s1_test_classifier.predict_one(x)
assert state_p[active_id] == pt
repo.states[active_id].learn_one(ob)
s1_test_classifier.learn_one(x, y)
s2 = repo.add_next_state()
active_id = s2.state_id
s2_test_classifier = HoeffdingTreeClassifier()
for t, (x, y) in enumerate(dataset.take(25), start=25):
ob = Observation(x, y, t, active_id)
state_p = repo.get_repository_predictions(ob, "all")
print(state_p)
assert len(state_p) == 2
pt_1 = s1_test_classifier.predict_one(x)
pt_2 = s2_test_classifier.predict_one(x)
assert state_p[active_id] == pt_2
assert state_p[s1.state_id] == pt_1
repo.states[active_id].learn_one(ob)
s2_test_classifier.learn_one(x, y)
| 38.694805
| 89
| 0.683672
| 872
| 5,959
| 4.417431
| 0.090596
| 0.047248
| 0.062305
| 0.049065
| 0.886033
| 0.878245
| 0.832295
| 0.810748
| 0.783229
| 0.763499
| 0
| 0.035336
| 0.206914
| 5,959
| 153
| 90
| 38.947712
| 0.779729
| 0.03373
| 0
| 0.696296
| 0
| 0
| 0.003136
| 0
| 0
| 0
| 0
| 0
| 0.422222
| 1
| 0.022222
| false
| 0
| 0.037037
| 0
| 0.059259
| 0.014815
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c093436d90063c85ab58a7aaf6082c8a80615a75
| 136
|
py
|
Python
|
tutorials/1.SimpleExamples/SimpleExample6/__init__.py
|
dominic-dev/pyformsd
|
23e31ceff2943bc0f7286d25dd14450a14b986af
|
[
"MIT"
] | null | null | null |
tutorials/1.SimpleExamples/SimpleExample6/__init__.py
|
dominic-dev/pyformsd
|
23e31ceff2943bc0f7286d25dd14450a14b986af
|
[
"MIT"
] | null | null | null |
tutorials/1.SimpleExamples/SimpleExample6/__init__.py
|
dominic-dev/pyformsd
|
23e31ceff2943bc0f7286d25dd14450a14b986af
|
[
"MIT"
] | null | null | null |
from pyforms import BaseWidget
from pyforms.Controls import ControlText
from pyforms.Controls import ControlButton
import pyforms
| 22.666667
| 43
| 0.838235
| 16
| 136
| 7.125
| 0.4375
| 0.289474
| 0.333333
| 0.438596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147059
| 136
| 6
| 44
| 22.666667
| 0.982759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
c098b09d05bb2ec4ca3b64f8c7bae7d8977fdb02
| 595
|
py
|
Python
|
task03/gen.py
|
rebryk/SPbAU-Speech-Recognition
|
8b1993d17d223f507f4e80154823a075e713ee52
|
[
"MIT"
] | 1
|
2019-04-22T14:10:46.000Z
|
2019-04-22T14:10:46.000Z
|
task03/gen.py
|
rebryk/SPbAU-Speech-Recognition
|
8b1993d17d223f507f4e80154823a075e713ee52
|
[
"MIT"
] | 15
|
2020-01-28T22:25:14.000Z
|
2022-03-11T23:24:04.000Z
|
task03/gen.py
|
rebryk/SPbAU-Speech-Recognition
|
8b1993d17d223f507f4e80154823a075e713ee52
|
[
"MIT"
] | 1
|
2019-04-22T14:01:21.000Z
|
2019-04-22T14:01:21.000Z
|
if __name__ == '__main__':
with open('train.csv', 'w') as f:
for i in range(1, 451):
f.write(f'/workspace/data/VCTK-Corpus/wav48/p239/p239_{i:03d}.wav,/workspace/data/VCTK-Corpus/txt/p239/p239_{i:03d}.txt\n')
with open('val.csv', 'w') as f:
for i in range(451, 476):
f.write(f'/workspace/data/VCTK-Corpus/wav48/p239/p239_{i:03d}.wav,/workspace/data/VCTK-Corpus/txt/p239/p239_{i:03d}.txt\n')
with open('test.csv', 'w') as f:
for i in range(476, 504):
f.write(f'/workspace/data/VCTK-Corpus/wav48/p239/p239_{i:03d}.wav,/workspace/data/VCTK-Corpus/txt/p239/p239_{i:03d}.txt\n')
| 42.5
| 126
| 0.682353
| 114
| 595
| 3.438596
| 0.27193
| 0.19898
| 0.260204
| 0.352041
| 0.882653
| 0.882653
| 0.882653
| 0.882653
| 0.744898
| 0.744898
| 0
| 0.130354
| 0.097479
| 595
| 13
| 127
| 45.769231
| 0.599628
| 0
| 0
| 0.3
| 0
| 0.3
| 0.619529
| 0.560606
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
c0c45c4fed129bc5f3d5c76152fd908781f3537c
| 2,485
|
py
|
Python
|
src/Python/Test signals and benchmarks/plot.py
|
Bojan-Lukic/master-thesis-signal-segmentation
|
8c74fb3c923a5c6e7797985f744e1e99a5236dbd
|
[
"MIT"
] | null | null | null |
src/Python/Test signals and benchmarks/plot.py
|
Bojan-Lukic/master-thesis-signal-segmentation
|
8c74fb3c923a5c6e7797985f744e1e99a5236dbd
|
[
"MIT"
] | null | null | null |
src/Python/Test signals and benchmarks/plot.py
|
Bojan-Lukic/master-thesis-signal-segmentation
|
8c74fb3c923a5c6e7797985f744e1e99a5236dbd
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
def single_plot(A, label_x, label_y, ax=False, color='#1f77b4'):
calibri = {'fontname':'Calibri'}
if ax == True:
ax = plt.figure(figsize = (12, 8)).gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
else:
plt.figure(figsize = (12, 8))
plt.grid()
plt.plot(A, color=color)
plt.xlabel(label_x, **calibri, fontsize = 18)
plt.ylabel(label_y, **calibri, fontsize = 18)
plt.yticks(fontsize = 14)
plt.xticks(fontsize = 14)
def line_plot(A, label_x, label_y, lines, labels):
calibri = {'fontname':'Calibri'}
plt.figure(figsize = (12, 8))
plt.grid()
plt.plot(A, label = labels[0])
plt.axvline(x = lines[0], color = "red", linestyle = 'dashed', label = labels[1], ymin = 0.02, ymax = 0.98)
for i in range(1, len(lines)):
plt.axvline(x = lines[i], color = "red", linestyle = 'dashed', ymin = 0.02, ymax = 0.98)
plt.xlabel(label_x, **calibri, fontsize = 18)
plt.ylabel(label_y, **calibri, fontsize = 18)
plt.yticks(fontsize = 14)
plt.xticks(fontsize = 14)
plt.legend(fontsize = 12)
def multiplot(results, label_x, label_y, color):
calibri = {'fontname':'Calibri'}
plt.figure(figsize = (12, 8))
plt.grid()
if len(color) != len(results):
for i in range (0, len(results)):
plt.plot(results[i])
else:
for i in range (0, len(results)):
plt.plot(results[i], color=color[i])
plt.xlabel(label_x, **calibri, fontsize = 18)
plt.ylabel(label_y, **calibri, fontsize = 18)
plt.yticks(fontsize = 14)
plt.xticks(fontsize = 14)
def multiplot_lines(results, label_x, label_y, color, lines, label):
calibri = {'fontname':'Calibri'}
plt.figure(figsize = (12, 8))
plt.grid()
if len(color) == len(results):
for i in range (0, len(results)):
plt.plot(results[i], color=color[i])
else:
for i in range (0, len(results)):
plt.plot(results[i])
plt.axvline(x = lines[0], color = "red", linestyle = 'dashed', label = label, ymin = 0.02, ymax = 0.98)
for i in range(1, len(lines)):
plt.axvline(x = lines[i], color = "red", linestyle = 'dashed', ymin = 0.02, ymax = 0.98)
plt.xlabel(label_x, **calibri, fontsize = 18)
plt.ylabel(label_y, **calibri, fontsize = 18)
plt.yticks(fontsize = 14)
plt.xticks(fontsize = 14)
plt.legend(fontsize = 12)
| 36.544118
| 111
| 0.604829
| 358
| 2,485
| 4.139665
| 0.175978
| 0.032389
| 0.091768
| 0.107962
| 0.825236
| 0.812416
| 0.757085
| 0.757085
| 0.757085
| 0.757085
| 0
| 0.046499
| 0.229779
| 2,485
| 68
| 112
| 36.544118
| 0.727795
| 0
| 0
| 0.75
| 0
| 0
| 0.041432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.033333
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8d04b7e0d37cdad4300ccb1c3df3c77f9a86db92
| 138
|
py
|
Python
|
discord/types/template.py
|
kuzaku-developers/disnake
|
61cc1ad4c2bafd39726a1447c85f7e469e41af10
|
[
"MIT"
] | null | null | null |
discord/types/template.py
|
kuzaku-developers/disnake
|
61cc1ad4c2bafd39726a1447c85f7e469e41af10
|
[
"MIT"
] | null | null | null |
discord/types/template.py
|
kuzaku-developers/disnake
|
61cc1ad4c2bafd39726a1447c85f7e469e41af10
|
[
"MIT"
] | null | null | null |
from disnake.types.template import *
from disnake.types.template import __dict__ as __original_dict__
locals().update(__original_dict__)
| 27.6
| 64
| 0.84058
| 18
| 138
| 5.666667
| 0.555556
| 0.215686
| 0.313725
| 0.470588
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 138
| 4
| 65
| 34.5
| 0.809524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
23b4366f7940df91ec1e6bd8c6c6ebb69916fd7b
| 389
|
py
|
Python
|
model/crfasrnn/cil.crfasrnn.R50/leonhard.py
|
fywalter/TorchSeg
|
729eb22d8c5d607466055552fd82e0819d5f29e2
|
[
"MIT"
] | null | null | null |
model/crfasrnn/cil.crfasrnn.R50/leonhard.py
|
fywalter/TorchSeg
|
729eb22d8c5d607466055552fd82e0819d5f29e2
|
[
"MIT"
] | null | null | null |
model/crfasrnn/cil.crfasrnn.R50/leonhard.py
|
fywalter/TorchSeg
|
729eb22d8c5d607466055552fd82e0819d5f29e2
|
[
"MIT"
] | 2
|
2020-07-31T14:40:49.000Z
|
2020-07-31T17:52:30.000Z
|
import os
lrs = [1e-3, 1e-5, 1e-7, 1e-9, 1e-11, 1e-13, 1e-15]
for lr in lrs:
print("bsub -n 4 -W 120:00 -R 'rusage[mem=10000, ngpus_excl_p=1]' python train.py -d 0 --snapshot_dir log/snapshot_{} --lr_crf {}".format(lr, lr))
os.system("bsub -n 4 -W 120:00 -R 'rusage[mem=10000, ngpus_excl_p=1]' python train.py -d 0 --snapshot_dir log/snapshot_{} --lr_crf {}".format(lr, lr))
| 38.9
| 155
| 0.637532
| 80
| 389
| 2.975
| 0.5
| 0.042017
| 0.05042
| 0.058824
| 0.756303
| 0.756303
| 0.756303
| 0.756303
| 0.756303
| 0.756303
| 0
| 0.131902
| 0.161954
| 389
| 9
| 156
| 43.222222
| 0.59816
| 0
| 0
| 0
| 0
| 0.4
| 0.630491
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
23fb6f6280c0c7d9d0c9845edcd4da855abf73a2
| 2,894
|
py
|
Python
|
Labs/FiniteDifferenceMethod/solution.py
|
rachelwebb/numerical_computing
|
e7416b43b97976060f6875fa46c7dca20a9f635f
|
[
"CC-BY-3.0"
] | null | null | null |
Labs/FiniteDifferenceMethod/solution.py
|
rachelwebb/numerical_computing
|
e7416b43b97976060f6875fa46c7dca20a9f635f
|
[
"CC-BY-3.0"
] | null | null | null |
Labs/FiniteDifferenceMethod/solution.py
|
rachelwebb/numerical_computing
|
e7416b43b97976060f6875fa46c7dca20a9f635f
|
[
"CC-BY-3.0"
] | 1
|
2020-12-08T01:19:23.000Z
|
2020-12-08T01:19:23.000Z
|
from __future__ import division
import numpy as np
from scipy.sparse import spdiags
from scipy.sparse.linalg import spsolve, cg
def general_secondorder_ode_fd(func,a1,a2,a3,a=0.,b=1.,alpha=1.,beta=3.,N=5):
# A Simple Finite Difference Scheme to solve BVP's of the form
# a1(x)u''(x) + a2(x)u'(x) + a3(x)u(x) = f(x), x \in [a,b]
# u(a) = alpha
# u(b) = beta
# (Dirichlet boundary conditions)
#
# U_0 = alpha, U_1, U_2, ..., U_m, U_{m+1} = beta
# We use m+1 subintervals, giving m algebraic equations
m = N-1
h = (b-a)/(m+1.) # Here we form the diagonals
D0,Dp,Dm,diags = np.zeros((1,m)), np.zeros((1,m)), np.zeros((1,m)), np.array([0,-1,1])
for j in range(1,D0.shape[1]):
xj = a + (j)*h
D0[0,j] = h**2.*a3(xj)-2.*a1(xj)
Dp[0,j] = a1(xj)-h*a2(xj)/2.
Dm[0,j-1] = a1(xj)+h*a2(xj)/2.
# xj = a + 1.*h
# D0[0,0] = h**2.*a3(xj)-2.*a1(xj)
# Here we create the matrix A
data = np.concatenate((D0,Dm,Dp),axis=0) # This stacks up rows
A=h**(-2.)*spdiags(data,diags,m,m).asformat('csr')
# Here we create the vector B
B = np.zeros(m+2)
for j in range(2,m):
B[j] = func(a + j*h)
xj = a+1.*h
B[0], B[1] = alpha, func(xj)-alpha *( a1(xj)*h**(-2.) + a2(xj)*h**(-1)/2. )
xj = a+m*h
B[-1], B[-2] = beta, func(xj)-beta*( a1(xj)*h**(-2.) - a2(xj)*h**(-1)/2. )
# Here we solve the equation AX = B and return the result
B[1:-1] = spsolve(A,B[1:-1])
return np.linspace(a,b,m+2), B
# def general_secondorder_ode_fd(func,a1,a2,a3,a=0.,b=1.,alpha=1.,beta=3.,N=5):
# # A Simple Finite Difference Scheme to solve BVP's of the form
# # a1(x)u''(x) + a2(x)u'(x) + a3(x)u(x) = f(x), x \in [a,b]
# # u(a) = alpha
# # u(b) = beta
# # (Dirichlet boundary conditions)
# #
# # U_0 = alpha, U_1, U_2, ..., U_m, U_{m+1} = beta
# # We use m+1 subintervals, giving m algebraic equations
# m = N-1
# h = (b-a)/(m+1.) # Here we form the diagonals
# D0,D1,D2,diags = np.zeros((1,m)), np.zeros((1,m)), np.zeros((1,m)), np.array([0,-1,1])
# for j in range(1,D1.shape[1]):
# xj = a + (j+1)*h
# D0[0,j] = h**2.*a3(xj)-2.*a1(xj)
# D1[0,j] = a1(xj)+h*a2(xj)/2.
# D2[0,j-1] = a1(xj)-h*a2(xj)/2.
# xj = a + 1.*h
# D0[0,0] = h**2.*a3(xj)-2.*a1(xj)
#
# # Here we create the matrix A
# data = np.concatenate((D0,D2,D1),axis=0) # This stacks up rows
# A=h**(-2.)*spdiags(data,diags,m,m).asformat('csr')
#
# # Here we create the vector B
# B = np.zeros(m+2)
# for j in range(2,m):
# B[j] = func(a + j*h)
# xj = a+1.*h
# B[0], B[1] = alpha, func(xj)-alpha *( a1(xj)*h**(-2.) - a2(xj)*h**(-1)/2. )
# xj = a+m*h
# B[-1], B[-2] = beta, func(xj)-beta*( a1(xj)*h**(-2.) + a2(xj)*h**(-1)/2. )
#
# # Here we solve the equation AX = B and return the result
# B[1:-1] = spsolve(A,B[1:-1])
# return np.linspace(a,b,m+2), B
#
| 34.86747
| 92
| 0.525916
| 599
| 2,894
| 2.507513
| 0.15192
| 0.031957
| 0.026631
| 0.035952
| 0.912117
| 0.898802
| 0.898802
| 0.898802
| 0.882823
| 0.882823
| 0
| 0.071046
| 0.226676
| 2,894
| 82
| 93
| 35.292683
| 0.600089
| 0.625777
| 0
| 0
| 0
| 0
| 0.002956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.166667
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9b26e7dae6b006d2d9e8a30edc07e668316f5eac
| 86
|
py
|
Python
|
amocrm_asterisk_ng/infrastructure/tracing/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/infrastructure/tracing/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
amocrm_asterisk_ng/infrastructure/tracing/__init__.py
|
iqtek/amocrn_asterisk_ng
|
429a8d0823b951c855a49c1d44ab0e05263c54dc
|
[
"MIT"
] | null | null | null |
from .generate_trace_id import generate_trace_id
from .startup import tracing_startup
| 28.666667
| 48
| 0.883721
| 13
| 86
| 5.461538
| 0.538462
| 0.366197
| 0.422535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 86
| 2
| 49
| 43
| 0.910256
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
f19d98659a0bb7f2342c7a918c2718724a093126
| 7,952
|
py
|
Python
|
tmvenom/tmvenom2.py
|
Ajijul123aa/Reverse-Engineering
|
fad3f3eccadc9ca71620e07a8f3318c00334bcaa
|
[
"Apache-2.0"
] | 337
|
2020-08-15T12:22:14.000Z
|
2022-03-29T06:05:15.000Z
|
tmvenom/tmvenom2.py
|
Wh014M/Reverse-Engineering
|
f7aae2c43f7ea4a6730964d085c07814b6660a53
|
[
"Apache-2.0"
] | 3
|
2020-11-12T14:30:48.000Z
|
2021-05-18T16:56:22.000Z
|
tmvenom/tmvenom2.py
|
Wh014M/Reverse-Engineering
|
f7aae2c43f7ea4a6730964d085c07814b6660a53
|
[
"Apache-2.0"
] | 83
|
2020-08-15T00:22:58.000Z
|
2022-03-31T08:40:23.000Z
|
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b16decode("789CED9C5F6F1BC711C0F7F8471425CA962CDBB22C2759274E223B9628C9B21DA79263DA561C15B2249C14B896E01A27DE49A47C242FBCA325227691204191222F7561D768517F82A208DA87BEF4A1055AF453F4A1C85B8016C837686766EF1F8F47899215DB754989CBBDBD99BDBDD9BDDFCC1D979B65F62B02EF4BF036D312632AFC4B4C676CC9CD4B6C4972F211B614A17C04F38B7669942D45990A698CA931B614676A9C2DB531B58D2D25989A604BED4C6D674B49A626D95207D3185BEF646A07FB5C6292BDD1E9DF107BD414FB02DA91626A1765BA98BA8F32FB98BA9F32FB99DA4D996EA6F650A687A907287380A9BD94E965EA41CA1C64EA21CA1C62EA61CA1C666A1F65FA987A84324798DA4F997EA61EA5CC51A60E506680A9C728738C69C7D8E70CDA27B185C157C070F9FFC06B7610ACC72C4C4E9909480796477F7061B4E0E5C77CF933BEFCB82F7FD6973FE7CB9F2F5871C867754D299B67B1B3723CAD2A9622926CA9306C69E5426533BD9AD735339D2B15B4B455B8AB154B05D85BD6D2E54A712CEB7439B6F20AD69264D483D73E849E5C18C491306BA1C0604862E26E75CDC20FC33AEC9430C9AD36EA547BD6AD36824914931826714CDA304940028782710183A28FFA1B9BD0814D307F07E937BFFC32FCFFD1637F9E731EB2CB5FEE976FA61EC8874AF32D0E6BFE095BFCE8D7DF3CFA39FEDBC50FEDCD474F1A5785DB0FDC8CAF150FEA6A6950D7961AA126A86F80F95B68BFFFE0BEFCC35AD98775870B6B7568031FD61D7FDB536F7068F3F78DDBFB00FBC1B3C493B0F2C73C4CC2EBAFF0DA1E37D7EA860D30BFDEA2D5DC193E4F0265011378FBEB2CBD85DE83604958FBC29A60FECA6EB3BBAB365FB3E55D01A105BE31DC58274CE549A343D6907654B4D47E4DF0ABDA5D4D2F199ACA2F57B98D54BEA86573C57C56D1F9F5CABAA6AD9803A86557C1574B65904090F28AA9954D7E51601D3969EAA50DA39C2F5A0DB818830FBDB456EAC7B26E2263BCE6AF9E93AA6473D222974A147436A3B489B918BA953E60671FB0B70FF0D947DB09F88CC267BB5D1E13180DD14806343AB6D5E80C68A4B6D5E80A68ECDB56637F40A37B5B8D9E80C6817A8D5E61C48368C49870D3E8560E915BF1BBE2D7203F5352D47C718D672A560E3A7EBA08DD5F50AC7CA9388C2FF33CC80CEDEE4581C03DB30D07D7C4E445CE4DF26C4A4113A393F64F9A2770BF18887CAB1769DF2C55ACCA0A5620F436363686ABA26C182280B4E50CED825B21E95DCB5BB98AD824BD9C6519E67BE9F41A9593AA7B55D86D21BD8D9C62998A61387AF68571A5BAA295B76AA7A5E9DA5A592984EA2DC3E10A9562DEAADE0AEA4D174D4B2145A177C96DD5ED4626A22EBAB7BB2E92F1BA1DC4B04AC64BD7C2ABB2ACA91468AD9535AD485774B6AA14A9485953F2C52D2E7D05061136DB3C6A5FFA1DF41EF3FD614956B243EE9803816B906C5E62F708057D576F8DB3FB1283B1BC2EB1F508BB27391CC0ED280D6938DE7A1C81019038847B3FEE6437689CC7689CE3213AD6046BBE7B3FAF6273DB9C5334ABA6851BA6A5C2D8A153DB28E72D8D72AB7AC5CCD1E958F9822832754D332834A4316B529AADB301D99022D3763AFF1EA95BEA12C873CE98CEF6325644A70A0DBFCFE8B423EC4E84954FE1016CEC51B4583CC8E0F286730600900D7C17741B9DE8758FFE1DCBDA2DFBDA9EDACC5B0EEEE1B2B60596A78A10223B323C5B2A5AF96245E3EFF349D73988CB5233BB20C55A000E4482611A2BB2E40C18F2076565E376BE68542C317C50A2649B164CAC15C88C05AD58112E848657A5586FB84EF898C0ED14192B057F1D527BC4365ECCEF2FBE94C8783406241A2EC43F7893CFA0F22883038319FB847DC9927EE178BD709B1096EA8513F5C2ED4238E236244611BC271667EB6D780F073D4B8EE49E241CC8BD881DE7C7049CD165D8F7055875026FEFA8EA28BB17C55B3C1C14F76DF108DEEAA1780F6EB5B3F524DEE961412FDE4D20EFE1F60E92C36C9D0EFD39DD7EF4D196646F1D71EE43B0BA7E5F9BE1660FF2A2E10362AC898D6362A393A48A0966E76808BE4243304E41C7C58BDC7C833C0A4616D3F31448CC64664FF3F9CA8A9ECF3A453732B3E601905B1EBA788B4F1B3CA3AA65CD346108D208330F138300915A5185B8C528952DD8370E2FB3D7519BC7C2D94A01693AC9CDC1808A62E5F85BBC889E6692A74D35AB94D574A1B432AC1877CC57DD4A94AA0EDE4F882B45950BD704F5F5A1C7D08A5A59C1E10F0224281CE2188E6873956E2BF990818AE5521EAAD7E0D232CA98A6CB1075954DEDB69535F8CC87730B8B93E477F9CCFC9C0C79B297CCC15E78AE0B956C16CE7FB5A2EB556E1F5553CDD34C447299B2C6C1EBF10D6030B74A1CFC039CBB9E372D90E483D574F12447E74A976DD5BC10E2AFAFCC5DBF9E99BDBAC03F9893F9D48FE667E6A61743DD01B56C607964A46066C4E1B325A34AB63114B89EF96508263738BA2F283379BEC8C1148011B3A46B8E3F227DDB35F96FF60F8A1A21A6E4858A6EE5D339A843073B0D8B1DA6663996DECEA8E6DB9E8A9E03E2F04FEEC3E6E4E0C03250CE1980D49093D499761B1C151A53A0422E170AB54D432FE5AD50E335FDA2C13F3F3395599882313EBDC8AF2F7C70656E76616E668A2F2C66E4C5E9D96B34828EE028D7CA77F3590D46B76981A3353FD645C75A1D6278D9361D441ECAC83E195D8A8C7B651C3432D257C65393B13A426E55D3A17708B99795EC1D9199F9688A768AB04EC6C07CD06575DE2024A339E853A9D227B8FB7AFF8E5D6977CB3416CF12A913E0E412E0E6E281CF849484F771D84EC2BB1B782E7229FB3E601FFCC5A92C016917FC7544EA59FF99E4384A11F412E4E9510A341F9A6A47BDF7E95641DC3238A202F1C25BDAC21EE283C2897A610FF131D7D724FD62DB21DE918BDA90F7551F8EF9BBAE8A40BB8FF51D8D41DF5903FACE5AD0FBDADEEF6B83407DCD79D8C44FB9904F79903FB63BC80B9A87937E2BC8A76A20DF2CDD37F2C5616D53A38A49FBE9A80ED5A9A50D732754EFC1FCD02AC044E3430ADF7CF7DC1E135EC6DBF49704F0DBD8F77F1AF0325E30835D8DA01DAD21B78C2114A1FA83129881EE886E666629B3A2573499A3908B6DF93826AF6382AD914FB0B07B31EC15DBC22616CF10AB7B88C48D482D3E91D40997D61EA91336A9130D48FDD3DAA73802AB5162961486EBA0E896B80E0A3783EBE82E719D60EBEDF5C44E8612FB13C907D8170BD702BE7B1598EF26263FD5544CAE6487E10D57F85346E5A703FC2E999B692070DACC4150747B1B72A76C72534B5AD016D01EAC877643A3FE1FE1FA2D27DABE393533337783F02DA3AD76056AAC1C46DD6FBC803ABE2DA6532EA6933BC6F417CF1CD3FE7AEB202D6056C369C1DF5D85D5E190AE3E07488B93785920ADE78B95CD614D5F7D5A488F07204D151351761066773861B6BEDA42B540F5483DAAB735ED4B8F6C023576C735796A6A361070EF9ED8582319F7EF5B333B4ECC4ED585D63B67F6270D990DD0F33D94DE8EDA35C24D513BF8B03B1278D81D6932B48E3854B491EDB524FC4988617F032CEDD9036FA73A22B6407524249E0E7DC6FD82A0BAA9A7202B4AF6CEB0517D5A508F06406D54AD5CA9B8E347DC4325DE0234027AA81ED05B9B741B3ACF5DC9CCBCE8744ED5D2395A8BE8008E71BCCAAFED96C9188E0B7B7EC7DCA71DC100DA8372F07947A327D38D91FCB39707C9DED30EAF31E181F44F5CAD60208DF3551B713955C3E55420968E0462E9882F960E0374970BE8AE170ED0A79B01B4835923673C2DA4878390CE192D42EF96D0A74208DDD89E2F019EF73589670CA3E5537B046AFA923167F44B35A1F3D694AE7DDC91DA51E8DCE2748BD3759C3ED90CA7E911E7B0997B5A480F06209D2DA8E94A31BFE9D2644531735B3DE6282B1BADC71C82D127EA191D6ACE169D774767FACA11EC77E559E1F9ABE73295CF9D6DB71713F91C393FA0D93680FED4D58AEC19A0BD0AEBE7F4B11D01FAF9CFE6DB01A00DFD7B07B4A195F516A0F70CD068CE9705D0D15A4A3F1340A3FD0A1EA0C3E6DFED1DA03F6BCDBF7B5126743CC7F9774D7D4968E614E0B1621ACF71069E836468450BC9ADA9774D4DBD1384C6737EBA591C30E61E4BCF64AA5D0BCA2D28EF08CAEBBB87F25000CAEBCA5D250DF5DD6E6A5E5D2B44AEE3F1C97A1E37B6690BC5BB4231D8F22F2D14B750FC02A27843293F5F1443035A286EA1F859A11886DBB7DF1B8A6B7E537F3CC6EC0503C4CFEA255A76C9FBB2CC79BA2BBEFDF3B681D08018B1BE46CC5E5924EE22D7936BB7CB93B65C87BDDD19904BD9E55D81F27D76F9FE4079B75DDE13283F6097F706CA0FDAE58702E587EDF2BE40F911BBBC3F507ED42E1F08941FB3CB5F0994BF6A97BF1628E7FE6D00F571DF9A06AF93DFF8A354D32F8E6C42A07C2D82926F90E43FC325DB85E424499E20C98E48A8645248F692E49B24391E2ED92124BF9550F22D92FC71B8A43D27F0CF24F936497E192E693BA747243948927F0897B49FBEDF21C99324F98F70C97D42324392A748B22D1A2AB95F48F691E43B2439122ED92D24FF4D7D749A2497C2257B84E45F49728824BF8A3A3B85C2708DC201A1F00B524893C2BF5C855EA68ED068898BF006D5476BD441622C287126EC00A37480713A4024661D14A503547A964A0FC7D4735009860A07BC5001974A99A5352C6A16C2B1BD0EBC2626028C5DE6F3999B337399ABFCFAD4EC47FC5690C11070707E978F0D8F38685F566EF1C9C98BB48086B3AC4EBEB80A1EB4CB3DCC724EC8D0DA38E06B0CD7312C8F8A3DF46BD28CF8BDB5EB9F68AD3A90D1850CC62E1CC20ADFAC6F704C6E4D63BE9AE6697A9CFB1D8927744608ED27A19CE1497822E3BEA3DD100F8F422A3A2BA4BA29E8C009D22132E7840C3E3CE7D7956C88C4797F73F09B977A91777D2297F1DBF37A910BBEE3644CDF49999D8E95477C223F0C1719F59DD10D889B3CD388C95E3B72D5579CE176AEB0A0E95AD6BAB4EC2CB8B528D628745660B9F5C67BDF3CFCB4E6FFC9D7DC1EAAB4CAC328A563949EA1749CD2B3949EA3F43CA5EF527A817EEC3F3A223E84B64EE7E905943A95E6CC63E2EC272626207577D310C5E8FA62404D4C90F02B3861994FA58BF9D73400253A9622E38A54E621EF3AE435EBCDD08255BEB824EEC62AED6EE84241CC9B98E0834E398D09C6753246CBF23B98E05C66197F2121E31366F90C26F843145A7346844218D18B056A68011BA37E7D1A0C7D0E41DEFC1B051E5D14887881CB71FB7B9866DEA0134D455350470202993DFEA4E0C9CB25696D05EF13E5F0DD1EA1955B8471B1772C3469B6A497CA4A41917B6A8DECFE60424CB4AD940D5D23C389859372794B133D1475BB8926FFC65C2BBFE9F6CB3B6EE7781D36E27653A087FA196BB40027B66EA250522BBA7691EEE32E4192B2170E4A45DB24DF5F049797E88A26A3C9CE644F3291EC4A9E4A9E84B793E2E7C9E4E9E4DDFF028F73CE1A"))))
| 2,650.666667
| 7,924
| 0.997736
| 12
| 7,952
| 661.166667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.616178
| 0.000377
| 7,952
| 2
| 7,925
| 3,976
| 0.381935
| 0
| 0
| 0
| 0
| 0
| 0.989185
| 0.989185
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 12
|
f1cf8fefaf147f4d2eaa1005e8a1f67235d9aa67
| 9,151
|
py
|
Python
|
method/plot.py
|
chonlei/3PNN
|
72c960421a307b187368441256fa2068fe1d0c1a
|
[
"BSD-3-Clause"
] | 1
|
2021-11-11T02:37:04.000Z
|
2021-11-11T02:37:04.000Z
|
method/plot.py
|
chonlei/3PNN
|
72c960421a307b187368441256fa2068fe1d0c1a
|
[
"BSD-3-Clause"
] | null | null | null |
method/plot.py
|
chonlei/3PNN
|
72c960421a307b187368441256fa2068fe1d0c1a
|
[
"BSD-3-Clause"
] | 1
|
2021-11-11T02:37:05.000Z
|
2021-11-11T02:37:05.000Z
|
#
# Quick diagnostic plots.
#
from __future__ import absolute_import, division
from __future__ import print_function, unicode_literals
import numpy as np
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def basic_plot(raw, fig=None, axes=None, palette='hls'):
"""
# Plot the raw data, simple plot.
#
# Input
# =====
# `raw`: Raw EFI input signal, expect shape (`n_readout`, `n_stimuli`).
# `fig`, `axes`: Matplotlib figure and axes handlers; if `None`, a `fig`
# and an `axes` handlers will be created.
# `palette`: Seaborn colour palette name to change plotting colour.
#
# Return
# =====
# Matplotlib figure and axes handlers.
"""
n_readout, n_stimuli = raw.shape
x = np.arange(n_readout) + 1
# Just set some cool colour...
c = sns.color_palette(palette, n_readout)
if (fig is None) or (axes is None):
fig, axes = plt.subplots(1, 1)
for i in range(n_stimuli):
axes.plot(x, raw[:, i], c=c[i])
axes.set_xlim([1, 16])
axes.set_ylim([0, 2])
axes.set_xticks(range(1, 17))
axes.set_xlabel('Electrode #')
axes.set_ylabel(r'Transimpedence (k$\Omega$)')
return fig, axes
def basic_plot_splitted(raw, fig=None, axes=None, c='C0', ls=''):
"""
# Get the curvature of the EFI measurement, with given a parameteric form.
#
# Input
# =====
# `raw`: Raw EFI input signal, expect shape (`n_readout`, `n_stimuli`).
# `fig`, `axes`: Matplotlib figure and axes handlers; if `None`, a `fig`
# and an `axes` handlers will be created.
# `c`: Plotting colour.
# `ls`: Matplotlib linestyle argument.
#
# Return
# =====
# Matplotlib figure and axes handlers.
"""
n_readout, n_stimuli = raw.shape
x = np.arange(n_readout) + 1
if (fig is None) or (axes is None):
fig, axes = plt.subplots(4, 4, figsize=(14, 10))
for i in range(n_stimuli):
ai, aj = i // 4, i % 4
axes[ai, aj].plot(x, raw[:, i], c=c, marker='o', ls=ls)
axes[ai, aj].set_xlim([1, 16])
axes[ai, aj].set_ylim([0, 2])
axes[ai, aj].set_xticks(range(1, 17))
axes[-1, 1].text(1.05, -0.3, 'Electrode #', ha='center', va='center',
transform=axes[-1, 1].transAxes)
axes[1, 0].text(-0.25, -0.25, r'Transimpedence (k$\Omega$)', ha='center',
va='center', transform=axes[1, 0].transAxes, rotation=90)
return fig, axes
def fitted_curves(p, func, fig=None, axes=None, palette='hls'):
"""
# Get the curvature of the EFI measurement, with given a parameteric form.
#
# Input
# =====
# `p`: Parameters for `func`; expect a dictionary with the stimulation
# electrode number as the key, and parameters as the value.
# `func`: Function to fit to each curve, with `n_parameters`, giving the
# parameters as the gradients.
# `fig`, `axes`: Matplotlib figure and axes handlers; if `None`, a `fig`
# and an `axes` handlers will be created.
# `palette`: Seaborn colour palette name to change plotting colour.
#
# Return
# =====
# Matplotlib figure and axes handlers.
"""
n_stimuli = len(p)
n_readout = n_stimuli # assume it is the case
x = np.arange(n_readout) + 1
# Just set some cool colour...
c = sns.color_palette(palette, n_readout)
if (fig is None) or (axes is None):
fig, axes = plt.subplots(1, 1)
for i in range(n_stimuli):
# Right
if p[i][0] is not None:
# Calculate
x1 = np.arange(1, n_readout - i - 1)
y1 = func(x1, *p[i][0])
# For plot
x_plot = x1 + i + 1
y_plot = y1
# And plot
axes.plot(x_plot, y_plot, c=c[i])
# Left
if p[i][1] is not None:
# Calculate
x2 = np.arange(1, i + 1)
y2 = func(x2, *p[i][1])
# For plot
x_plot = x2
y_plot = y2[::-1]
# And plot
axes.plot(x_plot, y_plot, c=c[i])
axes.set_xlim([1, 16])
axes.set_ylim([0, 2])
axes.set_xticks(range(1, 17))
axes.set_xlabel('Electrode #')
axes.set_ylabel(r'Transimpedence (k$\Omega$)')
return fig, axes
def fitted_curves_splitted(p, func, fig=None, axes=None, c='C2', ls='-'):
"""
# Get the curvature of the EFI measurement, with given a parameteric form.
#
# Input
# =====
# `p`: Parameters for `func`; expect a dictionary with the stimulation
# electrode number as the key, and parameters as the value.
# `func`: Function to fit to each curve, with `n_parameters`, giving the
# parameters as the gradients.
# `fig`, `axes`: Matplotlib figure and axes handlers; if `None`, a `fig`
# and an `axes` handlers will be created.
# `c`: Plotting colour.
# `ls`: Matplotlib linestyle argument.
#
# Return
# =====
# Matplotlib figure and axes handlers.
"""
n_stimuli = len(p)
n_readout = n_stimuli # assume it is the case
x = np.arange(n_readout) + 1
if (fig is None) or (axes is None):
fig, axes = plt.subplots(4, 4, figsize=(14, 10))
for i in range(n_stimuli):
ai, aj = i // 4, i % 4
# Right
if p[i][0] is not None:
# Calculate
x1 = np.arange(1, n_readout - i - 1)
y1 = func(x1, *p[i][0])
# For plot
x_plot = x1 + i + 1
y_plot = y1
# And plot
axes[ai, aj].plot(x_plot, y_plot, c=c, ls=ls)
# Left
if p[i][1] is not None:
# Calculate
x2 = np.arange(1, i + 1)
y2 = func(x2, *p[i][1])
# For plot
x_plot = x2
y_plot = y2[::-1]
# And plot
axes[ai, aj].plot(x_plot, y_plot, c=c, ls=ls)
axes[ai, aj].set_xlim([1, 16])
axes[ai, aj].set_ylim([0, 2])
axes[ai, aj].set_xticks(range(1, 17))
axes[-1, 1].text(1.05, -0.3, 'Electrode #', ha='center', va='center',
transform=axes[-1, 1].transAxes)
axes[1, 0].text(-0.25, -0.25, r'Transimpedence (k$\Omega$)', ha='center',
va='center', transform=axes[1, 0].transAxes, rotation=90)
return fig, axes
def parameters(rt, rl, fig=None, axes=None, c='C0', marker='o', ls='',
label=''):
"""
# Plot the parameters.
#
# Input
# =====
# `rt`: Transversal resistance parameters, last one is basel resistance.
# `rl`: Longitudinal resistance parameters.
# `fig`, `axes`: Matplotlib figure and axes handlers; if `None`, a `fig`
# and an `axes` handlers will be created.
# `c`: Plotting colour.
# `marker`: Matplotlib marker argument.
# `ls`: Matplotlib linestyle argument.
# `label`: Matplotlib label argument.
#
# Return
# =====
# Matplotlib figure and axes handlers.
"""
n_readout = len(rt)
assert(len(rt) == len(rl) + 1) # last one in R_T is R_basel
x = np.arange(n_readout) + 1
if (fig is None) or (axes is None):
fig, axes = plt.subplots(2, 1, figsize=(8, 5), sharex=True)
axes[0].plot(x, rt, marker=marker, c=c, ls=ls, label=label)
axes[0].set_yscale('log')
axes[0].set_ylabel(r'$R_T$ (k$\Omega$)')
axes[1].plot(x[:-1], rl, marker=marker, c=c, ls=ls, label=label)
axes[1].set_ylabel(r'$R_L$ (k$\Omega$)')
axes[1].set_xlabel('Resistor index')
axes[1].set_xlim([1, 16])
axes[1].set_xticks(range(1, 17))
return fig, axes
def sensitivity_analyse_splitted(x, y, fig=None, axes=None, c='C0', marker='o',
ls='', label='', xylabels=None):
"""
# Plot the feature sensitivity plot.
#
# Input
# =====
# `x`: An input/printing parameter (x-axis), with shape (`n_points`, ).
# `y`: A feature (y-axis), with shape (`n_points`, `n_stimuli`).
# `fig`, `axes`: Matplotlib figure and axes handlers; if `None`, a `fig`
# and an `axes` handlers will be created.
# `c`: Plotting colour.
# `marker`: Matplotlib marker argument.
# `ls`: Matplotlib linestyle argument.
# `label`: Matplotlib label argument.
# `xylabels`: [`x_label`, `y_label`] for the plot.
#
# Return
# =====
# Matplotlib figure and axes handlers.
"""
n_points, n_stimuli = y.shape
assert(len(x) == n_points)
if (fig is None) or (axes is None):
fig, axes = plt.subplots(4, 4, figsize=(14, 10))
for i in range(n_stimuli):
ai, aj = i // 4, i % 4
if any(np.isfinite(y[:, i])):
axes[ai, aj].plot(x, y[:, i], c=c, ls=ls, marker=marker,
label=label)
if xylabels is not None:
axes[-1, 1].text(1.05, -0.3, xylabels[0], ha='center', va='center',
transform=axes[-1, 1].transAxes)
axes[1, 0].text(-0.25, -0.25, xylabels[1], ha='center', va='center',
transform=axes[1, 0].transAxes, rotation=90)
return fig, axes
| 30.915541
| 79
| 0.555349
| 1,310
| 9,151
| 3.801527
| 0.135878
| 0.025301
| 0.045783
| 0.055422
| 0.828715
| 0.808835
| 0.781325
| 0.773695
| 0.770683
| 0.748795
| 0
| 0.02992
| 0.291444
| 9,151
| 295
| 80
| 31.020339
| 0.738125
| 0.368594
| 0
| 0.707317
| 0
| 0
| 0.054242
| 0
| 0
| 0
| 0
| 0
| 0.01626
| 1
| 0.04878
| false
| 0
| 0.04065
| 0
| 0.138211
| 0.00813
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f1e606a3180faea210de3394e084276830429408
| 105
|
py
|
Python
|
config/weights_dictionary.py
|
we684123/Telegram_search_text_alternative_plan
|
add97761a7dd044d17845789bfb315d624d2a38b
|
[
"MIT"
] | 1
|
2019-09-25T15:08:31.000Z
|
2019-09-25T15:08:31.000Z
|
config/weights_dictionary.py
|
we684123/Telegram_search_text_alternative_plan
|
add97761a7dd044d17845789bfb315d624d2a38b
|
[
"MIT"
] | null | null | null |
config/weights_dictionary.py
|
we684123/Telegram_search_text_alternative_plan
|
add97761a7dd044d17845789bfb315d624d2a38b
|
[
"MIT"
] | null | null | null |
def recommend_dictionary():
return {}
def coerce_dictionary():
return {
"另一種": 20,
}
| 15
| 27
| 0.571429
| 10
| 105
| 5.8
| 0.7
| 0.551724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.295238
| 105
| 6
| 28
| 17.5
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
7b0db11ad4f0a90d2b19bb6cdd1e4da00cc6a68b
| 81,171
|
py
|
Python
|
c3dm/dataset/dataset_configs.py
|
facebookresearch/c3dm
|
cac38418e41f75f1395422200b8d7bdf6725aa43
|
[
"MIT"
] | 15
|
2020-12-04T16:40:21.000Z
|
2021-11-06T01:35:16.000Z
|
c3dm/dataset/dataset_configs.py
|
facebookresearch/c3dm
|
cac38418e41f75f1395422200b8d7bdf6725aa43
|
[
"MIT"
] | 2
|
2021-03-16T09:05:22.000Z
|
2021-12-23T12:43:37.000Z
|
c3dm/dataset/dataset_configs.py
|
facebookresearch/c3dm
|
cac38418e41f75f1395422200b8d7bdf6725aa43
|
[
"MIT"
] | 2
|
2021-04-08T00:50:29.000Z
|
2021-11-06T01:35:06.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
# list of root folders containing the dataset images
IMAGE_ROOTS = {
'freicars_clickp_filtd': ('./dataset_root/freicars/',),
'freicars_clickp_filtd_dbg': ('./dataset_root/freicars/',),
'cub_birds_hrnet_v2': ('./dataset_root/cub_birds/',),
'celeba_ff': ('./dataset_root/celeba/',
'./dataset_root/florence/'),
'pascal3d_clickp_all': ('./dataset_root/PASCAL3D+_release1.1',),
}
MASK_ROOTS = copy.deepcopy(IMAGE_ROOTS)
DEPTH_ROOTS = copy.deepcopy(IMAGE_ROOTS)
MASK_ROOTS['cub_birds_hrnet_v2'] = ('./dataset_root/cub_birds/',)
DATASET_ROOT = './dataset_root'
DATASET_URL = {
'freicars_clickp_filtd_train': 'https://dl.fbaipublicfiles.com/c3dm/freicars_clickp_filtd_train.json.gz',
'freicars_clickp_filtd_val': 'https://dl.fbaipublicfiles.com/c3dm/freicars_clickp_filtd_val.json.gz',
}
IMAGE_URLS = {
'cub_birds_hrnet_v2': ('http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz',),
'pascal3d_clickp_all': ('ftp://cs.stanford.edu/cs/cvgl/PASCAL3D+_release1.1.zip',),
}
MASK_URLS = {
'cub_birds_hrnet_v2': ('',),
}
DEPTH_URLS = {
'cub_birds_hrnet_v2': ('',),
}
C3DM_URLS = {
'freicars_clickp_filtd': 'https://dl.fbaipublicfiles.com/c3dm/c3dm_freicars.tar.gz',
}
C3DPO_MODELS = {
'cub_birds_hrnet_orth_b50': './dataset_root/c3dpo_cub',
'celeba_orth_b50': '',
'p3d_all_orth_b10': '',
'freicars_clickp_persp_b10_ray': './dataset_root/c3dpo_freicars',
}
C3DPO_URLS = {
'cub_birds_hrnet_orth_b50': '',
'celeba_orth_b50': '',
'p3d_all_orth_b10': '',
'freicars_clickp_persp_b10_ray': 'https://dl.fbaipublicfiles.com/c3dm/c3dpo_freicars.tar.gz',
}
# ----- connectivity patterns for visualizing the stick-men
STICKS = {
'pose_track': [ [2, 0],[0, 1],[1, 5],[5, 7],
[9, 7],[1, 6],[6, 8],[10, 8],
[1, 12],[12, 11],[11, 1],[14, 12],
[11, 13],[15, 13],[16, 14]] ,
'h36m': [ [10, 9], [9, 8], [8, 14],
[14, 15], [15, 16], [8, 11],
[11, 12], [12, 13], [8, 7],
[7, 0], [1, 0], [1, 2],
[2, 3], [0, 4], [4, 5], [5, 6] ],
'cub_birds': [ [1, 5], [5, 4], [4, 9],
[9, 0], [0, 13], [0, 12],
[0, 8], [12, 13], [1, 14],
[14, 3], [3, 2], [2, 7],
[1, 10], [1, 6], [2, 11],
[2, 7], [8, 13] ],
'coco': [ [13,15], [14,16], [12,14], [11,12,], [11,13],
[0,12], [0,11], [8,10], [6,8],
[7,9], [5,7], [0,5], [0,6],
[0,3], [0,4], [0,2], [0,1] ],
'freicars': [[0, 8], [0, 4], [4, 10], [8, 10],
[10, 9], [9, 11], [8, 11],
[11, 6], [9, 2], [2, 6],
[4, 1], [5, 1], [0, 5], [5, 7], [1, 3],
[7, 3], [3, 2], [7, 6]],
'pascal3d': {
'car': [[0, 8], [0, 4], [4, 10], [8, 10],
[10, 9], [9, 11], [8, 11],
[11, 6], [9, 2], [2, 6],
[4, 1], [5, 1], [0, 5], [5, 7], [1, 3],
[7, 3], [3, 2], [7, 6]],
'aeroplane': [[2, 5], [1, 4], [5, 3], [3, 7],
[7, 0], [0, 5], [5, 7], [5, 6],
[6, 0], [6, 3], [2, 4], [2, 1]],
'motorbike': [[6, 2],
[2, 9],
[2, 3],
[3, 8],
[5, 8],
[3, 5],
[2, 1],
[1, 0],
[0, 7],
[0, 4],
[4, 7],
[1, 4],
[1, 7],
[1, 5],
[1, 8]],
'sofa': [[1, 5],
[5, 4],
[4, 6],
[6, 2],
[2, 0],
[1, 0],
[0, 4],
[1, 3],
[7, 5],
[2, 3],
[3, 7],
[9, 7],
[7, 6],
[6, 8],
[8, 9]],
'chair': [[7, 3],
[6, 2],
[9, 5],
[8, 4],
[7, 9],
[8, 6],
[6, 7],
[9, 8],
[9, 1],
[8, 0],
[1, 0]],
},
}
STICKS['cub_birds_hrnet'] = STICKS['cub_birds']
H36M_ACTIONS = [ 'Directions','Discussion','Eating','Greeting',
'Phoning','Photo','Posing','Purchases','Sitting',
'SittingDown','Smoking','Waiting','WalkDog',
'Walking','WalkTogether' ]
P3D_NUM_KEYPOINTS = {\
'aeroplane': 8,
'car': 12,
'tvmonitor': 8,
'sofa': 10,
'motorbike': 10,
'diningtable': 12,
'chair': 10,
'bus': 12,
'bottle': 7,
'boat': 7,
'bicycle': 11,
'train': 17 }
P3D_CLASSES = list(P3D_NUM_KEYPOINTS.keys())
# add the per-class p3d db paths
for cls_ in P3D_CLASSES:
IMAGE_ROOTS['pascal3d_clickp_'+cls_] = IMAGE_ROOTS['pascal3d_clickp_all']
IMAGE_ROOTS['pascal3d_clickp_mesh_'+cls_] = IMAGE_ROOTS['pascal3d_clickp_all']
IMAGE_ROOTS['pascal3d_clickp_clean_'+cls_] = IMAGE_ROOTS['pascal3d_clickp_all']
P3D_NUM_IMAGES={
'train':{"aeroplane": 1953, "car": 5627,
"tvmonitor": 1374,"sofa": 669,
"motorbike": 725,"diningtable": 751,
"chair": 1186,"bus": 1185,
"bottle": 1601,"boat": 2046,
"bicycle": 904,"train": 1113,},
'val': {"aeroplane": 269,"car": 294,
"tvmonitor": 206,"sofa": 37,
"motorbike": 116,"diningtable": 12,
"chair": 227,"bus": 153,
"bottle": 249,"boat": 163,
"bicycle": 115,"train": 109}}
DATASET_CFG = {
'freicars_clickp_filtd':
{
'image_height': 9*40,
'image_width': 16*40,
'max_angle_diff': 3.14/2,
'box_crop': False,
},
'celeba':
{
'image_height': 3*130,
'image_width': 3*130,
'max_angle_diff': 3.14/2,
'box_crop': False,
'subsample': 4,
},
'ldos_chairs':
{
'image_height': 3*110,
'image_width': 4*110,
'max_angle_diff': 3.14/2,
'min_visible': 6,
'kp_conf_thr': 0.8,
'box_crop': False,
},
'ldos_chairs_armchair':
{
'image_height': 3*110,
'image_width': 4*110,
'max_angle_diff': 3.14/2,
'min_visible': 4,
'kp_conf_thr': 0.6,
'box_crop': False,
},
'pascal3d_clickp':
{
'image_height': 3*6*20,
'image_width': 4*6*20,
'max_angle_diff': 3.14/2,
'min_visible': 6,
'box_crop': True,
},
'pascal3d_clickp_clean':
{
'image_height': 3*6*20,
'image_width': 4*6*20,
'max_angle_diff': 3.14/2,
# 'min_visible': 4,
'box_crop': True,
'dilate_masks': 0,
'box_crop_context': 0.2,
},
'h36m_sparse':
{
'image_height': 25*20,
'image_width': 15*20,
'max_angle_diff': 3.14/2,
# 'max_frame_diff': 0.33,
# 'min_visible': 6,
'subsample': 10,
'box_crop': True,
'box_crop_context': 0.2,
'dilate_masks': 0,
},
'cub_birds_hrnet_v2':
{
'image_height': 3*130,
'image_width': 3*130,
'max_angle_diff': 3.14/2,
'box_crop': False,
},
'default':
{
'image_height': 3*110,
'image_width': 4*110,
'max_angle_diff': 3.14/2,
'box_crop': False,
}
}
for cls_ in P3D_CLASSES:
DATASET_CFG['pascal3d_clickp_'+cls_] = DATASET_CFG['pascal3d_clickp']
DATASET_CFG['pascal3d_clickp_clean_'+cls_] = DATASET_CFG['pascal3d_clickp_clean']
FILTER_DB_SETTINGS = {
'freicars_clickp_filtd': {
'nn': 1e-3,
'perc_keep': 0.95,
'sig': 0.02,
'lap_size': 5e-4,
'lap_alpha': 0.9,
},
'default': {
'nn': 1e-3,
'perc_keep': 0.9,
'sig': 0.01,
'lap_size': 1e-3,
'lap_alpha': 0.9,
}
}
FREIBURG_VAL_IMAGES = [
"022/undistort/images/frame_0000001.jpg.half.jpg",
"022/undistort/images/frame_0000002.jpg.half.jpg",
"022/undistort/images/frame_0000003.jpg.half.jpg",
"022/undistort/images/frame_0000004.jpg.half.jpg",
"022/undistort/images/frame_0000005.jpg.half.jpg",
"022/undistort/images/frame_0000006.jpg.half.jpg",
"022/undistort/images/frame_0000007.jpg.half.jpg",
"022/undistort/images/frame_0000008.jpg.half.jpg",
"022/undistort/images/frame_0000009.jpg.half.jpg",
"022/undistort/images/frame_0000010.jpg.half.jpg",
"022/undistort/images/frame_0000011.jpg.half.jpg",
"022/undistort/images/frame_0000012.jpg.half.jpg",
"022/undistort/images/frame_0000013.jpg.half.jpg",
"022/undistort/images/frame_0000014.jpg.half.jpg",
"022/undistort/images/frame_0000015.jpg.half.jpg",
"022/undistort/images/frame_0000016.jpg.half.jpg",
"022/undistort/images/frame_0000017.jpg.half.jpg",
"022/undistort/images/frame_0000018.jpg.half.jpg",
"022/undistort/images/frame_0000019.jpg.half.jpg",
"022/undistort/images/frame_0000020.jpg.half.jpg",
"022/undistort/images/frame_0000021.jpg.half.jpg",
"022/undistort/images/frame_0000022.jpg.half.jpg",
"022/undistort/images/frame_0000023.jpg.half.jpg",
"022/undistort/images/frame_0000024.jpg.half.jpg",
"022/undistort/images/frame_0000025.jpg.half.jpg",
"022/undistort/images/frame_0000026.jpg.half.jpg",
"022/undistort/images/frame_0000027.jpg.half.jpg",
"022/undistort/images/frame_0000030.jpg.half.jpg",
"022/undistort/images/frame_0000031.jpg.half.jpg",
"022/undistort/images/frame_0000032.jpg.half.jpg",
"022/undistort/images/frame_0000033.jpg.half.jpg",
"022/undistort/images/frame_0000034.jpg.half.jpg",
"022/undistort/images/frame_0000035.jpg.half.jpg",
"022/undistort/images/frame_0000036.jpg.half.jpg",
"022/undistort/images/frame_0000037.jpg.half.jpg",
"022/undistort/images/frame_0000038.jpg.half.jpg",
"022/undistort/images/frame_0000039.jpg.half.jpg",
"022/undistort/images/frame_0000040.jpg.half.jpg",
"022/undistort/images/frame_0000041.jpg.half.jpg",
"022/undistort/images/frame_0000042.jpg.half.jpg",
"022/undistort/images/frame_0000043.jpg.half.jpg",
"022/undistort/images/frame_0000044.jpg.half.jpg",
"022/undistort/images/frame_0000045.jpg.half.jpg",
"022/undistort/images/frame_0000046.jpg.half.jpg",
"022/undistort/images/frame_0000047.jpg.half.jpg",
"022/undistort/images/frame_0000048.jpg.half.jpg",
"022/undistort/images/frame_0000049.jpg.half.jpg",
"022/undistort/images/frame_0000050.jpg.half.jpg",
"022/undistort/images/frame_0000051.jpg.half.jpg",
"022/undistort/images/frame_0000052.jpg.half.jpg",
"022/undistort/images/frame_0000053.jpg.half.jpg",
"022/undistort/images/frame_0000054.jpg.half.jpg",
"022/undistort/images/frame_0000055.jpg.half.jpg",
"022/undistort/images/frame_0000056.jpg.half.jpg",
"022/undistort/images/frame_0000057.jpg.half.jpg",
"022/undistort/images/frame_0000058.jpg.half.jpg",
"022/undistort/images/frame_0000059.jpg.half.jpg",
"022/undistort/images/frame_0000060.jpg.half.jpg",
"022/undistort/images/frame_0000061.jpg.half.jpg",
"022/undistort/images/frame_0000062.jpg.half.jpg",
"022/undistort/images/frame_0000063.jpg.half.jpg",
"022/undistort/images/frame_0000064.jpg.half.jpg",
"022/undistort/images/frame_0000065.jpg.half.jpg",
"022/undistort/images/frame_0000066.jpg.half.jpg",
"022/undistort/images/frame_0000067.jpg.half.jpg",
"022/undistort/images/frame_0000068.jpg.half.jpg",
"022/undistort/images/frame_0000069.jpg.half.jpg",
"022/undistort/images/frame_0000070.jpg.half.jpg",
"022/undistort/images/frame_0000071.jpg.half.jpg",
"022/undistort/images/frame_0000072.jpg.half.jpg",
"022/undistort/images/frame_0000073.jpg.half.jpg",
"022/undistort/images/frame_0000074.jpg.half.jpg",
"022/undistort/images/frame_0000075.jpg.half.jpg",
"022/undistort/images/frame_0000076.jpg.half.jpg",
"022/undistort/images/frame_0000077.jpg.half.jpg",
"022/undistort/images/frame_0000078.jpg.half.jpg",
"022/undistort/images/frame_0000079.jpg.half.jpg",
"022/undistort/images/frame_0000080.jpg.half.jpg",
"022/undistort/images/frame_0000081.jpg.half.jpg",
"022/undistort/images/frame_0000082.jpg.half.jpg",
"022/undistort/images/frame_0000083.jpg.half.jpg",
"022/undistort/images/frame_0000084.jpg.half.jpg",
"022/undistort/images/frame_0000085.jpg.half.jpg",
"022/undistort/images/frame_0000086.jpg.half.jpg",
"022/undistort/images/frame_0000087.jpg.half.jpg",
"022/undistort/images/frame_0000088.jpg.half.jpg",
"022/undistort/images/frame_0000089.jpg.half.jpg",
"022/undistort/images/frame_0000090.jpg.half.jpg",
"022/undistort/images/frame_0000091.jpg.half.jpg",
"022/undistort/images/frame_0000092.jpg.half.jpg",
"022/undistort/images/frame_0000093.jpg.half.jpg",
"022/undistort/images/frame_0000094.jpg.half.jpg",
"022/undistort/images/frame_0000095.jpg.half.jpg",
"022/undistort/images/frame_0000096.jpg.half.jpg",
"022/undistort/images/frame_0000097.jpg.half.jpg",
"022/undistort/images/frame_0000098.jpg.half.jpg",
"022/undistort/images/frame_0000099.jpg.half.jpg",
"022/undistort/images/frame_0000101.jpg.half.jpg",
"022/undistort/images/frame_0000104.jpg.half.jpg",
"022/undistort/images/frame_0000105.jpg.half.jpg",
"022/undistort/images/frame_0000106.jpg.half.jpg",
"022/undistort/images/frame_0000107.jpg.half.jpg",
"022/undistort/images/frame_0000108.jpg.half.jpg",
"022/undistort/images/frame_0000109.jpg.half.jpg",
"022/undistort/images/frame_0000110.jpg.half.jpg",
"022/undistort/images/frame_0000111.jpg.half.jpg",
"022/undistort/images/frame_0000112.jpg.half.jpg",
"022/undistort/images/frame_0000113.jpg.half.jpg",
"022/undistort/images/frame_0000114.jpg.half.jpg",
"022/undistort/images/frame_0000115.jpg.half.jpg",
"022/undistort/images/frame_0000116.jpg.half.jpg",
"022/undistort/images/frame_0000117.jpg.half.jpg",
"022/undistort/images/frame_0000118.jpg.half.jpg",
"022/undistort/images/frame_0000119.jpg.half.jpg",
"022/undistort/images/frame_0000120.jpg.half.jpg",
"022/undistort/images/frame_0000121.jpg.half.jpg",
"022/undistort/images/frame_0000122.jpg.half.jpg",
"022/undistort/images/frame_0000123.jpg.half.jpg",
"022/undistort/images/frame_0000124.jpg.half.jpg",
"022/undistort/images/frame_0000125.jpg.half.jpg",
"022/undistort/images/frame_0000126.jpg.half.jpg",
"022/undistort/images/frame_0000127.jpg.half.jpg",
"022/undistort/images/frame_0000128.jpg.half.jpg",
"022/undistort/images/frame_0000129.jpg.half.jpg",
"022/undistort/images/frame_0000130.jpg.half.jpg",
"022/undistort/images/frame_0000131.jpg.half.jpg",
"022/undistort/images/frame_0000132.jpg.half.jpg",
"022/undistort/images/frame_0000133.jpg.half.jpg",
"022/undistort/images/frame_0000134.jpg.half.jpg",
"022/undistort/images/frame_0000135.jpg.half.jpg",
"022/undistort/images/frame_0000136.jpg.half.jpg",
"022/undistort/images/frame_0000137.jpg.half.jpg",
"022/undistort/images/frame_0000138.jpg.half.jpg",
"022/undistort/images/frame_0000139.jpg.half.jpg",
"022/undistort/images/frame_0000140.jpg.half.jpg",
"022/undistort/images/frame_0000141.jpg.half.jpg",
"022/undistort/images/frame_0000142.jpg.half.jpg",
"022/undistort/images/frame_0000143.jpg.half.jpg",
"022/undistort/images/frame_0000144.jpg.half.jpg",
"022/undistort/images/frame_0000145.jpg.half.jpg",
"022/undistort/images/frame_0000146.jpg.half.jpg",
"022/undistort/images/frame_0000147.jpg.half.jpg",
"022/undistort/images/frame_0000148.jpg.half.jpg",
"022/undistort/images/frame_0000149.jpg.half.jpg",
"022/undistort/images/frame_0000150.jpg.half.jpg",
"022/undistort/images/frame_0000151.jpg.half.jpg",
"022/undistort/images/frame_0000152.jpg.half.jpg",
"022/undistort/images/frame_0000153.jpg.half.jpg",
"022/undistort/images/frame_0000154.jpg.half.jpg",
"022/undistort/images/frame_0000155.jpg.half.jpg",
"022/undistort/images/frame_0000156.jpg.half.jpg",
"022/undistort/images/frame_0000157.jpg.half.jpg",
"022/undistort/images/frame_0000158.jpg.half.jpg",
"022/undistort/images/frame_0000159.jpg.half.jpg",
"022/undistort/images/frame_0000160.jpg.half.jpg",
"022/undistort/images/frame_0000161.jpg.half.jpg",
"022/undistort/images/frame_0000162.jpg.half.jpg",
"022/undistort/images/frame_0000163.jpg.half.jpg",
"022/undistort/images/frame_0000164.jpg.half.jpg",
"022/undistort/images/frame_0000165.jpg.half.jpg",
"022/undistort/images/frame_0000166.jpg.half.jpg",
"022/undistort/images/frame_0000167.jpg.half.jpg",
"022/undistort/images/frame_0000168.jpg.half.jpg",
"022/undistort/images/frame_0000169.jpg.half.jpg",
"022/undistort/images/frame_0000170.jpg.half.jpg",
"022/undistort/images/frame_0000171.jpg.half.jpg",
"022/undistort/images/frame_0000172.jpg.half.jpg",
"022/undistort/images/frame_0000173.jpg.half.jpg",
"022/undistort/images/frame_0000174.jpg.half.jpg",
"022/undistort/images/frame_0000176.jpg.half.jpg",
"022/undistort/images/frame_0000177.jpg.half.jpg",
"022/undistort/images/frame_0000178.jpg.half.jpg",
"022/undistort/images/frame_0000179.jpg.half.jpg",
"022/undistort/images/frame_0000180.jpg.half.jpg",
"022/undistort/images/frame_0000181.jpg.half.jpg",
"022/undistort/images/frame_0000182.jpg.half.jpg",
"022/undistort/images/frame_0000183.jpg.half.jpg",
"022/undistort/images/frame_0000184.jpg.half.jpg",
"022/undistort/images/frame_0000185.jpg.half.jpg",
"022/undistort/images/frame_0000186.jpg.half.jpg",
"022/undistort/images/frame_0000187.jpg.half.jpg",
"022/undistort/images/frame_0000188.jpg.half.jpg",
"022/undistort/images/frame_0000189.jpg.half.jpg",
"022/undistort/images/frame_0000190.jpg.half.jpg",
"022/undistort/images/frame_0000191.jpg.half.jpg",
"022/undistort/images/frame_0000192.jpg.half.jpg",
"022/undistort/images/frame_0000193.jpg.half.jpg",
"022/undistort/images/frame_0000194.jpg.half.jpg",
"022/undistort/images/frame_0000195.jpg.half.jpg",
"022/undistort/images/frame_0000196.jpg.half.jpg",
"022/undistort/images/frame_0000197.jpg.half.jpg",
"022/undistort/images/frame_0000198.jpg.half.jpg",
"022/undistort/images/frame_0000199.jpg.half.jpg",
"022/undistort/images/frame_0000200.jpg.half.jpg",
"022/undistort/images/frame_0000201.jpg.half.jpg",
"022/undistort/images/frame_0000202.jpg.half.jpg",
"022/undistort/images/frame_0000203.jpg.half.jpg",
"022/undistort/images/frame_0000204.jpg.half.jpg",
"022/undistort/images/frame_0000205.jpg.half.jpg",
"022/undistort/images/frame_0000206.jpg.half.jpg",
"022/undistort/images/frame_0000207.jpg.half.jpg",
"022/undistort/images/frame_0000208.jpg.half.jpg",
"022/undistort/images/frame_0000209.jpg.half.jpg",
"022/undistort/images/frame_0000210.jpg.half.jpg",
"022/undistort/images/frame_0000211.jpg.half.jpg",
"022/undistort/images/frame_0000212.jpg.half.jpg",
"022/undistort/images/frame_0000213.jpg.half.jpg",
"022/undistort/images/frame_0000214.jpg.half.jpg",
"022/undistort/images/frame_0000215.jpg.half.jpg",
"022/undistort/images/frame_0000216.jpg.half.jpg",
"022/undistort/images/frame_0000217.jpg.half.jpg",
"022/undistort/images/frame_0000218.jpg.half.jpg",
"022/undistort/images/frame_0000219.jpg.half.jpg",
"022/undistort/images/frame_0000220.jpg.half.jpg",
"022/undistort/images/frame_0000221.jpg.half.jpg",
"022/undistort/images/frame_0000222.jpg.half.jpg",
"022/undistort/images/frame_0000223.jpg.half.jpg",
"022/undistort/images/frame_0000224.jpg.half.jpg",
"022/undistort/images/frame_0000225.jpg.half.jpg",
"022/undistort/images/frame_0000226.jpg.half.jpg",
"022/undistort/images/frame_0000227.jpg.half.jpg",
"022/undistort/images/frame_0000228.jpg.half.jpg",
"022/undistort/images/frame_0000229.jpg.half.jpg",
"022/undistort/images/frame_0000230.jpg.half.jpg",
"022/undistort/images/frame_0000231.jpg.half.jpg",
"022/undistort/images/frame_0000232.jpg.half.jpg",
"022/undistort/images/frame_0000233.jpg.half.jpg",
"022/undistort/images/frame_0000234.jpg.half.jpg",
"022/undistort/images/frame_0000235.jpg.half.jpg",
"022/undistort/images/frame_0000236.jpg.half.jpg",
"022/undistort/images/frame_0000237.jpg.half.jpg",
"022/undistort/images/frame_0000238.jpg.half.jpg",
"022/undistort/images/frame_0000239.jpg.half.jpg",
"022/undistort/images/frame_0000240.jpg.half.jpg",
"022/undistort/images/frame_0000241.jpg.half.jpg",
"022/undistort/images/frame_0000242.jpg.half.jpg",
"022/undistort/images/frame_0000243.jpg.half.jpg",
"022/undistort/images/frame_0000244.jpg.half.jpg",
"022/undistort/images/frame_0000245.jpg.half.jpg",
"022/undistort/images/frame_0000246.jpg.half.jpg",
"022/undistort/images/frame_0000247.jpg.half.jpg",
"022/undistort/images/frame_0000248.jpg.half.jpg",
"022/undistort/images/frame_0000249.jpg.half.jpg",
"022/undistort/images/frame_0000250.jpg.half.jpg",
"022/undistort/images/frame_0000251.jpg.half.jpg",
"022/undistort/images/frame_0000252.jpg.half.jpg",
"022/undistort/images/frame_0000253.jpg.half.jpg",
"022/undistort/images/frame_0000254.jpg.half.jpg",
"022/undistort/images/frame_0000255.jpg.half.jpg",
"022/undistort/images/frame_0000256.jpg.half.jpg",
"022/undistort/images/frame_0000257.jpg.half.jpg",
"022/undistort/images/frame_0000258.jpg.half.jpg",
"022/undistort/images/frame_0000259.jpg.half.jpg",
"022/undistort/images/frame_0000260.jpg.half.jpg",
"022/undistort/images/frame_0000261.jpg.half.jpg",
"022/undistort/images/frame_0000262.jpg.half.jpg",
"022/undistort/images/frame_0000263.jpg.half.jpg",
"022/undistort/images/frame_0000264.jpg.half.jpg",
"022/undistort/images/frame_0000265.jpg.half.jpg",
"022/undistort/images/frame_0000266.jpg.half.jpg",
"022/undistort/images/frame_0000267.jpg.half.jpg",
"022/undistort/images/frame_0000268.jpg.half.jpg",
"022/undistort/images/frame_0000269.jpg.half.jpg",
"022/undistort/images/frame_0000270.jpg.half.jpg",
"022/undistort/images/frame_0000271.jpg.half.jpg",
"022/undistort/images/frame_0000272.jpg.half.jpg",
"022/undistort/images/frame_0000273.jpg.half.jpg",
"022/undistort/images/frame_0000274.jpg.half.jpg",
"022/undistort/images/frame_0000275.jpg.half.jpg",
"022/undistort/images/frame_0000276.jpg.half.jpg",
"022/undistort/images/frame_0000277.jpg.half.jpg",
"022/undistort/images/frame_0000278.jpg.half.jpg",
"022/undistort/images/frame_0000279.jpg.half.jpg",
"022/undistort/images/frame_0000280.jpg.half.jpg",
"022/undistort/images/frame_0000281.jpg.half.jpg",
"022/undistort/images/frame_0000283.jpg.half.jpg",
"022/undistort/images/frame_0000284.jpg.half.jpg",
"022/undistort/images/frame_0000285.jpg.half.jpg",
"022/undistort/images/frame_0000286.jpg.half.jpg",
"022/undistort/images/frame_0000287.jpg.half.jpg",
"022/undistort/images/frame_0000288.jpg.half.jpg",
"022/undistort/images/frame_0000289.jpg.half.jpg",
"022/undistort/images/frame_0000290.jpg.half.jpg",
"022/undistort/images/frame_0000291.jpg.half.jpg",
"022/undistort/images/frame_0000292.jpg.half.jpg",
"022/undistort/images/frame_0000293.jpg.half.jpg",
"022/undistort/images/frame_0000294.jpg.half.jpg",
"022/undistort/images/frame_0000295.jpg.half.jpg",
"022/undistort/images/frame_0000296.jpg.half.jpg",
"022/undistort/images/frame_0000297.jpg.half.jpg",
"022/undistort/images/frame_0000298.jpg.half.jpg",
"022/undistort/images/frame_0000299.jpg.half.jpg",
"022/undistort/images/frame_0000300.jpg.half.jpg",
"022/undistort/images/frame_0000301.jpg.half.jpg",
"022/undistort/images/frame_0000302.jpg.half.jpg",
"022/undistort/images/frame_0000303.jpg.half.jpg",
"022/undistort/images/frame_0000304.jpg.half.jpg",
"022/undistort/images/frame_0000305.jpg.half.jpg",
"022/undistort/images/frame_0000306.jpg.half.jpg",
"022/undistort/images/frame_0000307.jpg.half.jpg",
"022/undistort/images/frame_0000308.jpg.half.jpg",
"022/undistort/images/frame_0000309.jpg.half.jpg",
"022/undistort/images/frame_0000310.jpg.half.jpg",
"022/undistort/images/frame_0000311.jpg.half.jpg",
"022/undistort/images/frame_0000312.jpg.half.jpg",
"022/undistort/images/frame_0000313.jpg.half.jpg",
"022/undistort/images/frame_0000314.jpg.half.jpg",
"022/undistort/images/frame_0000315.jpg.half.jpg",
"022/undistort/images/frame_0000316.jpg.half.jpg",
"022/undistort/images/frame_0000317.jpg.half.jpg",
"022/undistort/images/frame_0000318.jpg.half.jpg",
"022/undistort/images/frame_0000319.jpg.half.jpg",
"022/undistort/images/frame_0000320.jpg.half.jpg",
"022/undistort/images/frame_0000321.jpg.half.jpg",
"022/undistort/images/frame_0000322.jpg.half.jpg",
"022/undistort/images/frame_0000323.jpg.half.jpg",
"022/undistort/images/frame_0000324.jpg.half.jpg",
"022/undistort/images/frame_0000325.jpg.half.jpg",
"022/undistort/images/frame_0000326.jpg.half.jpg",
"022/undistort/images/frame_0000327.jpg.half.jpg",
"022/undistort/images/frame_0000328.jpg.half.jpg",
"022/undistort/images/frame_0000329.jpg.half.jpg",
"022/undistort/images/frame_0000330.jpg.half.jpg",
"022/undistort/images/frame_0000331.jpg.half.jpg",
"022/undistort/images/frame_0000332.jpg.half.jpg",
"022/undistort/images/frame_0000333.jpg.half.jpg",
"022/undistort/images/frame_0000334.jpg.half.jpg",
"022/undistort/images/frame_0000335.jpg.half.jpg",
"022/undistort/images/frame_0000336.jpg.half.jpg",
"022/undistort/images/frame_0000337.jpg.half.jpg",
"022/undistort/images/frame_0000338.jpg.half.jpg",
"022/undistort/images/frame_0000339.jpg.half.jpg",
"022/undistort/images/frame_0000340.jpg.half.jpg",
"022/undistort/images/frame_0000341.jpg.half.jpg",
"022/undistort/images/frame_0000342.jpg.half.jpg",
"022/undistort/images/frame_0000343.jpg.half.jpg",
"022/undistort/images/frame_0000344.jpg.half.jpg",
"022/undistort/images/frame_0000345.jpg.half.jpg",
"022/undistort/images/frame_0000346.jpg.half.jpg",
"022/undistort/images/frame_0000347.jpg.half.jpg",
"022/undistort/images/frame_0000348.jpg.half.jpg",
"022/undistort/images/frame_0000349.jpg.half.jpg",
"022/undistort/images/frame_0000350.jpg.half.jpg",
"022/undistort/images/frame_0000351.jpg.half.jpg",
"022/undistort/images/frame_0000352.jpg.half.jpg",
"022/undistort/images/frame_0000353.jpg.half.jpg",
"034/undistort/images/frame_0000001.jpg.half.jpg",
"034/undistort/images/frame_0000002.jpg.half.jpg",
"034/undistort/images/frame_0000003.jpg.half.jpg",
"034/undistort/images/frame_0000004.jpg.half.jpg",
"034/undistort/images/frame_0000005.jpg.half.jpg",
"034/undistort/images/frame_0000006.jpg.half.jpg",
"034/undistort/images/frame_0000007.jpg.half.jpg",
"034/undistort/images/frame_0000008.jpg.half.jpg",
"034/undistort/images/frame_0000009.jpg.half.jpg",
"034/undistort/images/frame_0000010.jpg.half.jpg",
"034/undistort/images/frame_0000011.jpg.half.jpg",
"034/undistort/images/frame_0000013.jpg.half.jpg",
"034/undistort/images/frame_0000014.jpg.half.jpg",
"034/undistort/images/frame_0000015.jpg.half.jpg",
"034/undistort/images/frame_0000016.jpg.half.jpg",
"034/undistort/images/frame_0000017.jpg.half.jpg",
"034/undistort/images/frame_0000018.jpg.half.jpg",
"034/undistort/images/frame_0000019.jpg.half.jpg",
"034/undistort/images/frame_0000020.jpg.half.jpg",
"034/undistort/images/frame_0000021.jpg.half.jpg",
"034/undistort/images/frame_0000022.jpg.half.jpg",
"034/undistort/images/frame_0000023.jpg.half.jpg",
"034/undistort/images/frame_0000024.jpg.half.jpg",
"034/undistort/images/frame_0000025.jpg.half.jpg",
"034/undistort/images/frame_0000027.jpg.half.jpg",
"034/undistort/images/frame_0000028.jpg.half.jpg",
"034/undistort/images/frame_0000029.jpg.half.jpg",
"034/undistort/images/frame_0000031.jpg.half.jpg",
"034/undistort/images/frame_0000032.jpg.half.jpg",
"034/undistort/images/frame_0000033.jpg.half.jpg",
"034/undistort/images/frame_0000036.jpg.half.jpg",
"034/undistort/images/frame_0000037.jpg.half.jpg",
"034/undistort/images/frame_0000038.jpg.half.jpg",
"034/undistort/images/frame_0000039.jpg.half.jpg",
"034/undistort/images/frame_0000040.jpg.half.jpg",
"034/undistort/images/frame_0000041.jpg.half.jpg",
"034/undistort/images/frame_0000043.jpg.half.jpg",
"034/undistort/images/frame_0000044.jpg.half.jpg",
"034/undistort/images/frame_0000045.jpg.half.jpg",
"034/undistort/images/frame_0000049.jpg.half.jpg",
"034/undistort/images/frame_0000106.jpg.half.jpg",
"034/undistort/images/frame_0000107.jpg.half.jpg",
"034/undistort/images/frame_0000108.jpg.half.jpg",
"034/undistort/images/frame_0000109.jpg.half.jpg",
"034/undistort/images/frame_0000110.jpg.half.jpg",
"034/undistort/images/frame_0000111.jpg.half.jpg",
"034/undistort/images/frame_0000112.jpg.half.jpg",
"034/undistort/images/frame_0000113.jpg.half.jpg",
"034/undistort/images/frame_0000114.jpg.half.jpg",
"034/undistort/images/frame_0000115.jpg.half.jpg",
"034/undistort/images/frame_0000116.jpg.half.jpg",
"034/undistort/images/frame_0000117.jpg.half.jpg",
"034/undistort/images/frame_0000118.jpg.half.jpg",
"034/undistort/images/frame_0000119.jpg.half.jpg",
"034/undistort/images/frame_0000120.jpg.half.jpg",
"034/undistort/images/frame_0000121.jpg.half.jpg",
"034/undistort/images/frame_0000122.jpg.half.jpg",
"034/undistort/images/frame_0000123.jpg.half.jpg",
"034/undistort/images/frame_0000124.jpg.half.jpg",
"034/undistort/images/frame_0000125.jpg.half.jpg",
"034/undistort/images/frame_0000126.jpg.half.jpg",
"034/undistort/images/frame_0000127.jpg.half.jpg",
"034/undistort/images/frame_0000128.jpg.half.jpg",
"034/undistort/images/frame_0000129.jpg.half.jpg",
"034/undistort/images/frame_0000130.jpg.half.jpg",
"034/undistort/images/frame_0000131.jpg.half.jpg",
"034/undistort/images/frame_0000132.jpg.half.jpg",
"034/undistort/images/frame_0000133.jpg.half.jpg",
"034/undistort/images/frame_0000134.jpg.half.jpg",
"034/undistort/images/frame_0000135.jpg.half.jpg",
"034/undistort/images/frame_0000136.jpg.half.jpg",
"034/undistort/images/frame_0000137.jpg.half.jpg",
"034/undistort/images/frame_0000138.jpg.half.jpg",
"034/undistort/images/frame_0000139.jpg.half.jpg",
"034/undistort/images/frame_0000140.jpg.half.jpg",
"034/undistort/images/frame_0000141.jpg.half.jpg",
"034/undistort/images/frame_0000142.jpg.half.jpg",
"034/undistort/images/frame_0000143.jpg.half.jpg",
"034/undistort/images/frame_0000144.jpg.half.jpg",
"034/undistort/images/frame_0000145.jpg.half.jpg",
"034/undistort/images/frame_0000146.jpg.half.jpg",
"034/undistort/images/frame_0000147.jpg.half.jpg",
"034/undistort/images/frame_0000148.jpg.half.jpg",
"034/undistort/images/frame_0000149.jpg.half.jpg",
"034/undistort/images/frame_0000150.jpg.half.jpg",
"034/undistort/images/frame_0000151.jpg.half.jpg",
"034/undistort/images/frame_0000152.jpg.half.jpg",
"034/undistort/images/frame_0000153.jpg.half.jpg",
"034/undistort/images/frame_0000154.jpg.half.jpg",
"034/undistort/images/frame_0000155.jpg.half.jpg",
"034/undistort/images/frame_0000156.jpg.half.jpg",
"034/undistort/images/frame_0000157.jpg.half.jpg",
"034/undistort/images/frame_0000158.jpg.half.jpg",
"034/undistort/images/frame_0000159.jpg.half.jpg",
"034/undistort/images/frame_0000160.jpg.half.jpg",
"034/undistort/images/frame_0000161.jpg.half.jpg",
"034/undistort/images/frame_0000162.jpg.half.jpg",
"034/undistort/images/frame_0000163.jpg.half.jpg",
"034/undistort/images/frame_0000164.jpg.half.jpg",
"034/undistort/images/frame_0000165.jpg.half.jpg",
"034/undistort/images/frame_0000166.jpg.half.jpg",
"034/undistort/images/frame_0000167.jpg.half.jpg",
"034/undistort/images/frame_0000168.jpg.half.jpg",
"034/undistort/images/frame_0000169.jpg.half.jpg",
"034/undistort/images/frame_0000170.jpg.half.jpg",
"034/undistort/images/frame_0000171.jpg.half.jpg",
"034/undistort/images/frame_0000172.jpg.half.jpg",
"034/undistort/images/frame_0000173.jpg.half.jpg",
"034/undistort/images/frame_0000174.jpg.half.jpg",
"034/undistort/images/frame_0000175.jpg.half.jpg",
"034/undistort/images/frame_0000176.jpg.half.jpg",
"034/undistort/images/frame_0000177.jpg.half.jpg",
"034/undistort/images/frame_0000178.jpg.half.jpg",
"034/undistort/images/frame_0000179.jpg.half.jpg",
"034/undistort/images/frame_0000180.jpg.half.jpg",
"034/undistort/images/frame_0000181.jpg.half.jpg",
"034/undistort/images/frame_0000182.jpg.half.jpg",
"034/undistort/images/frame_0000184.jpg.half.jpg",
"034/undistort/images/frame_0000185.jpg.half.jpg",
"034/undistort/images/frame_0000186.jpg.half.jpg",
"034/undistort/images/frame_0000187.jpg.half.jpg",
"034/undistort/images/frame_0000188.jpg.half.jpg",
"034/undistort/images/frame_0000189.jpg.half.jpg",
"034/undistort/images/frame_0000190.jpg.half.jpg",
"034/undistort/images/frame_0000191.jpg.half.jpg",
"034/undistort/images/frame_0000192.jpg.half.jpg",
"034/undistort/images/frame_0000193.jpg.half.jpg",
"034/undistort/images/frame_0000194.jpg.half.jpg",
"034/undistort/images/frame_0000195.jpg.half.jpg",
"034/undistort/images/frame_0000196.jpg.half.jpg",
"034/undistort/images/frame_0000197.jpg.half.jpg",
"034/undistort/images/frame_0000198.jpg.half.jpg",
"034/undistort/images/frame_0000199.jpg.half.jpg",
"034/undistort/images/frame_0000200.jpg.half.jpg",
"034/undistort/images/frame_0000201.jpg.half.jpg",
"034/undistort/images/frame_0000202.jpg.half.jpg",
"034/undistort/images/frame_0000203.jpg.half.jpg",
"034/undistort/images/frame_0000204.jpg.half.jpg",
"034/undistort/images/frame_0000205.jpg.half.jpg",
"034/undistort/images/frame_0000206.jpg.half.jpg",
"034/undistort/images/frame_0000207.jpg.half.jpg",
"034/undistort/images/frame_0000208.jpg.half.jpg",
"034/undistort/images/frame_0000209.jpg.half.jpg",
"034/undistort/images/frame_0000210.jpg.half.jpg",
"034/undistort/images/frame_0000211.jpg.half.jpg",
"034/undistort/images/frame_0000213.jpg.half.jpg",
"034/undistort/images/frame_0000214.jpg.half.jpg",
"034/undistort/images/frame_0000215.jpg.half.jpg",
"034/undistort/images/frame_0000216.jpg.half.jpg",
"034/undistort/images/frame_0000218.jpg.half.jpg",
"034/undistort/images/frame_0000219.jpg.half.jpg",
"034/undistort/images/frame_0000220.jpg.half.jpg",
"034/undistort/images/frame_0000221.jpg.half.jpg",
"034/undistort/images/frame_0000222.jpg.half.jpg",
"034/undistort/images/frame_0000223.jpg.half.jpg",
"034/undistort/images/frame_0000224.jpg.half.jpg",
"034/undistort/images/frame_0000225.jpg.half.jpg",
"034/undistort/images/frame_0000226.jpg.half.jpg",
"034/undistort/images/frame_0000227.jpg.half.jpg",
"034/undistort/images/frame_0000228.jpg.half.jpg",
"034/undistort/images/frame_0000229.jpg.half.jpg",
"034/undistort/images/frame_0000232.jpg.half.jpg",
"034/undistort/images/frame_0000233.jpg.half.jpg",
"034/undistort/images/frame_0000234.jpg.half.jpg",
"034/undistort/images/frame_0000236.jpg.half.jpg",
"034/undistort/images/frame_0000237.jpg.half.jpg",
"034/undistort/images/frame_0000239.jpg.half.jpg",
"034/undistort/images/frame_0000240.jpg.half.jpg",
"034/undistort/images/frame_0000241.jpg.half.jpg",
"034/undistort/images/frame_0000242.jpg.half.jpg",
"034/undistort/images/frame_0000243.jpg.half.jpg",
"034/undistort/images/frame_0000247.jpg.half.jpg",
"034/undistort/images/frame_0000248.jpg.half.jpg",
"034/undistort/images/frame_0000249.jpg.half.jpg",
"034/undistort/images/frame_0000250.jpg.half.jpg",
"034/undistort/images/frame_0000254.jpg.half.jpg",
"034/undistort/images/frame_0000255.jpg.half.jpg",
"034/undistort/images/frame_0000256.jpg.half.jpg",
"034/undistort/images/frame_0000257.jpg.half.jpg",
"034/undistort/images/frame_0000259.jpg.half.jpg",
"034/undistort/images/frame_0000260.jpg.half.jpg",
"034/undistort/images/frame_0000261.jpg.half.jpg",
"034/undistort/images/frame_0000262.jpg.half.jpg",
"034/undistort/images/frame_0000263.jpg.half.jpg",
"034/undistort/images/frame_0000264.jpg.half.jpg",
"034/undistort/images/frame_0000265.jpg.half.jpg",
"034/undistort/images/frame_0000268.jpg.half.jpg",
"036/undistort/images/frame_0000001.jpg.half.jpg",
"036/undistort/images/frame_0000002.jpg.half.jpg",
"036/undistort/images/frame_0000003.jpg.half.jpg",
"036/undistort/images/frame_0000004.jpg.half.jpg",
"036/undistort/images/frame_0000005.jpg.half.jpg",
"036/undistort/images/frame_0000006.jpg.half.jpg",
"036/undistort/images/frame_0000007.jpg.half.jpg",
"036/undistort/images/frame_0000008.jpg.half.jpg",
"036/undistort/images/frame_0000009.jpg.half.jpg",
"036/undistort/images/frame_0000010.jpg.half.jpg",
"036/undistort/images/frame_0000011.jpg.half.jpg",
"036/undistort/images/frame_0000012.jpg.half.jpg",
"036/undistort/images/frame_0000013.jpg.half.jpg",
"036/undistort/images/frame_0000014.jpg.half.jpg",
"036/undistort/images/frame_0000015.jpg.half.jpg",
"036/undistort/images/frame_0000016.jpg.half.jpg",
"036/undistort/images/frame_0000017.jpg.half.jpg",
"036/undistort/images/frame_0000018.jpg.half.jpg",
"036/undistort/images/frame_0000019.jpg.half.jpg",
"036/undistort/images/frame_0000020.jpg.half.jpg",
"036/undistort/images/frame_0000021.jpg.half.jpg",
"036/undistort/images/frame_0000022.jpg.half.jpg",
"036/undistort/images/frame_0000023.jpg.half.jpg",
"036/undistort/images/frame_0000024.jpg.half.jpg",
"036/undistort/images/frame_0000025.jpg.half.jpg",
"036/undistort/images/frame_0000026.jpg.half.jpg",
"036/undistort/images/frame_0000027.jpg.half.jpg",
"036/undistort/images/frame_0000028.jpg.half.jpg",
"036/undistort/images/frame_0000029.jpg.half.jpg",
"036/undistort/images/frame_0000030.jpg.half.jpg",
"036/undistort/images/frame_0000031.jpg.half.jpg",
"036/undistort/images/frame_0000032.jpg.half.jpg",
"036/undistort/images/frame_0000033.jpg.half.jpg",
"036/undistort/images/frame_0000034.jpg.half.jpg",
"036/undistort/images/frame_0000035.jpg.half.jpg",
"036/undistort/images/frame_0000036.jpg.half.jpg",
"036/undistort/images/frame_0000037.jpg.half.jpg",
"036/undistort/images/frame_0000038.jpg.half.jpg",
"036/undistort/images/frame_0000039.jpg.half.jpg",
"036/undistort/images/frame_0000041.jpg.half.jpg",
"036/undistort/images/frame_0000042.jpg.half.jpg",
"036/undistort/images/frame_0000043.jpg.half.jpg",
"036/undistort/images/frame_0000044.jpg.half.jpg",
"036/undistort/images/frame_0000045.jpg.half.jpg",
"036/undistort/images/frame_0000046.jpg.half.jpg",
"036/undistort/images/frame_0000047.jpg.half.jpg",
"036/undistort/images/frame_0000048.jpg.half.jpg",
"036/undistort/images/frame_0000049.jpg.half.jpg",
"036/undistort/images/frame_0000050.jpg.half.jpg",
"036/undistort/images/frame_0000051.jpg.half.jpg",
"036/undistort/images/frame_0000052.jpg.half.jpg",
"036/undistort/images/frame_0000053.jpg.half.jpg",
"036/undistort/images/frame_0000054.jpg.half.jpg",
"036/undistort/images/frame_0000055.jpg.half.jpg",
"036/undistort/images/frame_0000056.jpg.half.jpg",
"036/undistort/images/frame_0000057.jpg.half.jpg",
"036/undistort/images/frame_0000058.jpg.half.jpg",
"036/undistort/images/frame_0000059.jpg.half.jpg",
"036/undistort/images/frame_0000060.jpg.half.jpg",
"036/undistort/images/frame_0000061.jpg.half.jpg",
"036/undistort/images/frame_0000062.jpg.half.jpg",
"036/undistort/images/frame_0000063.jpg.half.jpg",
"036/undistort/images/frame_0000064.jpg.half.jpg",
"036/undistort/images/frame_0000065.jpg.half.jpg",
"036/undistort/images/frame_0000066.jpg.half.jpg",
"036/undistort/images/frame_0000067.jpg.half.jpg",
"036/undistort/images/frame_0000068.jpg.half.jpg",
"036/undistort/images/frame_0000069.jpg.half.jpg",
"036/undistort/images/frame_0000070.jpg.half.jpg",
"036/undistort/images/frame_0000071.jpg.half.jpg",
"036/undistort/images/frame_0000072.jpg.half.jpg",
"036/undistort/images/frame_0000073.jpg.half.jpg",
"036/undistort/images/frame_0000074.jpg.half.jpg",
"036/undistort/images/frame_0000075.jpg.half.jpg",
"036/undistort/images/frame_0000076.jpg.half.jpg",
"036/undistort/images/frame_0000077.jpg.half.jpg",
"036/undistort/images/frame_0000078.jpg.half.jpg",
"036/undistort/images/frame_0000079.jpg.half.jpg",
"036/undistort/images/frame_0000080.jpg.half.jpg",
"036/undistort/images/frame_0000081.jpg.half.jpg",
"036/undistort/images/frame_0000082.jpg.half.jpg",
"036/undistort/images/frame_0000083.jpg.half.jpg",
"036/undistort/images/frame_0000084.jpg.half.jpg",
"036/undistort/images/frame_0000085.jpg.half.jpg",
"036/undistort/images/frame_0000086.jpg.half.jpg",
"036/undistort/images/frame_0000087.jpg.half.jpg",
"036/undistort/images/frame_0000088.jpg.half.jpg",
"036/undistort/images/frame_0000089.jpg.half.jpg",
"036/undistort/images/frame_0000090.jpg.half.jpg",
"036/undistort/images/frame_0000091.jpg.half.jpg",
"036/undistort/images/frame_0000092.jpg.half.jpg",
"036/undistort/images/frame_0000093.jpg.half.jpg",
"036/undistort/images/frame_0000095.jpg.half.jpg",
"036/undistort/images/frame_0000096.jpg.half.jpg",
"036/undistort/images/frame_0000097.jpg.half.jpg",
"036/undistort/images/frame_0000098.jpg.half.jpg",
"036/undistort/images/frame_0000099.jpg.half.jpg",
"036/undistort/images/frame_0000100.jpg.half.jpg",
"036/undistort/images/frame_0000101.jpg.half.jpg",
"036/undistort/images/frame_0000102.jpg.half.jpg",
"036/undistort/images/frame_0000103.jpg.half.jpg",
"036/undistort/images/frame_0000104.jpg.half.jpg",
"036/undistort/images/frame_0000105.jpg.half.jpg",
"036/undistort/images/frame_0000106.jpg.half.jpg",
"036/undistort/images/frame_0000107.jpg.half.jpg",
"036/undistort/images/frame_0000108.jpg.half.jpg",
"036/undistort/images/frame_0000109.jpg.half.jpg",
"036/undistort/images/frame_0000110.jpg.half.jpg",
"036/undistort/images/frame_0000111.jpg.half.jpg",
"036/undistort/images/frame_0000112.jpg.half.jpg",
"036/undistort/images/frame_0000113.jpg.half.jpg",
"036/undistort/images/frame_0000114.jpg.half.jpg",
"036/undistort/images/frame_0000115.jpg.half.jpg",
"036/undistort/images/frame_0000116.jpg.half.jpg",
"036/undistort/images/frame_0000117.jpg.half.jpg",
"036/undistort/images/frame_0000118.jpg.half.jpg",
"036/undistort/images/frame_0000121.jpg.half.jpg",
"036/undistort/images/frame_0000122.jpg.half.jpg",
"036/undistort/images/frame_0000123.jpg.half.jpg",
"036/undistort/images/frame_0000124.jpg.half.jpg",
"036/undistort/images/frame_0000125.jpg.half.jpg",
"036/undistort/images/frame_0000126.jpg.half.jpg",
"036/undistort/images/frame_0000127.jpg.half.jpg",
"036/undistort/images/frame_0000128.jpg.half.jpg",
"036/undistort/images/frame_0000129.jpg.half.jpg",
"036/undistort/images/frame_0000130.jpg.half.jpg",
"036/undistort/images/frame_0000131.jpg.half.jpg",
"036/undistort/images/frame_0000132.jpg.half.jpg",
"036/undistort/images/frame_0000133.jpg.half.jpg",
"036/undistort/images/frame_0000134.jpg.half.jpg",
"036/undistort/images/frame_0000135.jpg.half.jpg",
"036/undistort/images/frame_0000136.jpg.half.jpg",
"036/undistort/images/frame_0000137.jpg.half.jpg",
"036/undistort/images/frame_0000138.jpg.half.jpg",
"036/undistort/images/frame_0000139.jpg.half.jpg",
"036/undistort/images/frame_0000140.jpg.half.jpg",
"036/undistort/images/frame_0000141.jpg.half.jpg",
"036/undistort/images/frame_0000142.jpg.half.jpg",
"036/undistort/images/frame_0000143.jpg.half.jpg",
"036/undistort/images/frame_0000144.jpg.half.jpg",
"036/undistort/images/frame_0000145.jpg.half.jpg",
"036/undistort/images/frame_0000146.jpg.half.jpg",
"036/undistort/images/frame_0000147.jpg.half.jpg",
"036/undistort/images/frame_0000148.jpg.half.jpg",
"036/undistort/images/frame_0000149.jpg.half.jpg",
"036/undistort/images/frame_0000150.jpg.half.jpg",
"036/undistort/images/frame_0000151.jpg.half.jpg",
"036/undistort/images/frame_0000152.jpg.half.jpg",
"036/undistort/images/frame_0000153.jpg.half.jpg",
"036/undistort/images/frame_0000154.jpg.half.jpg",
"036/undistort/images/frame_0000155.jpg.half.jpg",
"036/undistort/images/frame_0000156.jpg.half.jpg",
"036/undistort/images/frame_0000157.jpg.half.jpg",
"036/undistort/images/frame_0000158.jpg.half.jpg",
"036/undistort/images/frame_0000159.jpg.half.jpg",
"036/undistort/images/frame_0000160.jpg.half.jpg",
"036/undistort/images/frame_0000161.jpg.half.jpg",
"036/undistort/images/frame_0000162.jpg.half.jpg",
"036/undistort/images/frame_0000163.jpg.half.jpg",
"036/undistort/images/frame_0000164.jpg.half.jpg",
"036/undistort/images/frame_0000165.jpg.half.jpg",
"036/undistort/images/frame_0000166.jpg.half.jpg",
"036/undistort/images/frame_0000167.jpg.half.jpg",
"036/undistort/images/frame_0000168.jpg.half.jpg",
"036/undistort/images/frame_0000169.jpg.half.jpg",
"036/undistort/images/frame_0000170.jpg.half.jpg",
"036/undistort/images/frame_0000171.jpg.half.jpg",
"036/undistort/images/frame_0000172.jpg.half.jpg",
"036/undistort/images/frame_0000173.jpg.half.jpg",
"036/undistort/images/frame_0000174.jpg.half.jpg",
"036/undistort/images/frame_0000175.jpg.half.jpg",
"036/undistort/images/frame_0000176.jpg.half.jpg",
"036/undistort/images/frame_0000177.jpg.half.jpg",
"036/undistort/images/frame_0000178.jpg.half.jpg",
"036/undistort/images/frame_0000179.jpg.half.jpg",
"036/undistort/images/frame_0000180.jpg.half.jpg",
"036/undistort/images/frame_0000181.jpg.half.jpg",
"036/undistort/images/frame_0000182.jpg.half.jpg",
"036/undistort/images/frame_0000183.jpg.half.jpg",
"036/undistort/images/frame_0000184.jpg.half.jpg",
"036/undistort/images/frame_0000185.jpg.half.jpg",
"036/undistort/images/frame_0000186.jpg.half.jpg",
"036/undistort/images/frame_0000187.jpg.half.jpg",
"036/undistort/images/frame_0000188.jpg.half.jpg",
"036/undistort/images/frame_0000189.jpg.half.jpg",
"036/undistort/images/frame_0000190.jpg.half.jpg",
"036/undistort/images/frame_0000191.jpg.half.jpg",
"036/undistort/images/frame_0000192.jpg.half.jpg",
"036/undistort/images/frame_0000193.jpg.half.jpg",
"036/undistort/images/frame_0000194.jpg.half.jpg",
"036/undistort/images/frame_0000195.jpg.half.jpg",
"036/undistort/images/frame_0000196.jpg.half.jpg",
"036/undistort/images/frame_0000197.jpg.half.jpg",
"036/undistort/images/frame_0000198.jpg.half.jpg",
"036/undistort/images/frame_0000199.jpg.half.jpg",
"036/undistort/images/frame_0000200.jpg.half.jpg",
"036/undistort/images/frame_0000201.jpg.half.jpg",
"036/undistort/images/frame_0000202.jpg.half.jpg",
"036/undistort/images/frame_0000203.jpg.half.jpg",
"036/undistort/images/frame_0000204.jpg.half.jpg",
"036/undistort/images/frame_0000205.jpg.half.jpg",
"036/undistort/images/frame_0000206.jpg.half.jpg",
"036/undistort/images/frame_0000207.jpg.half.jpg",
"036/undistort/images/frame_0000208.jpg.half.jpg",
"036/undistort/images/frame_0000209.jpg.half.jpg",
"036/undistort/images/frame_0000210.jpg.half.jpg",
"036/undistort/images/frame_0000211.jpg.half.jpg",
"036/undistort/images/frame_0000212.jpg.half.jpg",
"036/undistort/images/frame_0000213.jpg.half.jpg",
"036/undistort/images/frame_0000214.jpg.half.jpg",
"036/undistort/images/frame_0000215.jpg.half.jpg",
"036/undistort/images/frame_0000216.jpg.half.jpg",
"036/undistort/images/frame_0000217.jpg.half.jpg",
"036/undistort/images/frame_0000218.jpg.half.jpg",
"036/undistort/images/frame_0000219.jpg.half.jpg",
"036/undistort/images/frame_0000220.jpg.half.jpg",
"036/undistort/images/frame_0000221.jpg.half.jpg",
"036/undistort/images/frame_0000222.jpg.half.jpg",
"036/undistort/images/frame_0000223.jpg.half.jpg",
"036/undistort/images/frame_0000224.jpg.half.jpg",
"036/undistort/images/frame_0000225.jpg.half.jpg",
"036/undistort/images/frame_0000226.jpg.half.jpg",
"036/undistort/images/frame_0000227.jpg.half.jpg",
"036/undistort/images/frame_0000228.jpg.half.jpg",
"036/undistort/images/frame_0000229.jpg.half.jpg",
"036/undistort/images/frame_0000230.jpg.half.jpg",
"036/undistort/images/frame_0000231.jpg.half.jpg",
"036/undistort/images/frame_0000232.jpg.half.jpg",
"036/undistort/images/frame_0000233.jpg.half.jpg",
"036/undistort/images/frame_0000234.jpg.half.jpg",
"036/undistort/images/frame_0000235.jpg.half.jpg",
"036/undistort/images/frame_0000236.jpg.half.jpg",
"036/undistort/images/frame_0000237.jpg.half.jpg",
"036/undistort/images/frame_0000238.jpg.half.jpg",
"036/undistort/images/frame_0000239.jpg.half.jpg",
"036/undistort/images/frame_0000240.jpg.half.jpg",
"036/undistort/images/frame_0000241.jpg.half.jpg",
"036/undistort/images/frame_0000242.jpg.half.jpg",
"036/undistort/images/frame_0000243.jpg.half.jpg",
"036/undistort/images/frame_0000244.jpg.half.jpg",
"036/undistort/images/frame_0000245.jpg.half.jpg",
"036/undistort/images/frame_0000246.jpg.half.jpg",
"036/undistort/images/frame_0000247.jpg.half.jpg",
"036/undistort/images/frame_0000248.jpg.half.jpg",
"036/undistort/images/frame_0000249.jpg.half.jpg",
"036/undistort/images/frame_0000250.jpg.half.jpg",
"036/undistort/images/frame_0000251.jpg.half.jpg",
"036/undistort/images/frame_0000252.jpg.half.jpg",
"036/undistort/images/frame_0000253.jpg.half.jpg",
"036/undistort/images/frame_0000254.jpg.half.jpg",
"036/undistort/images/frame_0000255.jpg.half.jpg",
"036/undistort/images/frame_0000256.jpg.half.jpg",
"036/undistort/images/frame_0000257.jpg.half.jpg",
"036/undistort/images/frame_0000258.jpg.half.jpg",
"036/undistort/images/frame_0000259.jpg.half.jpg",
"036/undistort/images/frame_0000260.jpg.half.jpg",
"036/undistort/images/frame_0000261.jpg.half.jpg",
"036/undistort/images/frame_0000262.jpg.half.jpg",
"036/undistort/images/frame_0000263.jpg.half.jpg",
"036/undistort/images/frame_0000264.jpg.half.jpg",
"036/undistort/images/frame_0000265.jpg.half.jpg",
"036/undistort/images/frame_0000266.jpg.half.jpg",
"036/undistort/images/frame_0000267.jpg.half.jpg",
"036/undistort/images/frame_0000268.jpg.half.jpg",
"036/undistort/images/frame_0000269.jpg.half.jpg",
"036/undistort/images/frame_0000270.jpg.half.jpg",
"036/undistort/images/frame_0000271.jpg.half.jpg",
"036/undistort/images/frame_0000272.jpg.half.jpg",
"036/undistort/images/frame_0000273.jpg.half.jpg",
"036/undistort/images/frame_0000274.jpg.half.jpg",
"036/undistort/images/frame_0000275.jpg.half.jpg",
"036/undistort/images/frame_0000276.jpg.half.jpg",
"036/undistort/images/frame_0000277.jpg.half.jpg",
"036/undistort/images/frame_0000278.jpg.half.jpg",
"036/undistort/images/frame_0000279.jpg.half.jpg",
"036/undistort/images/frame_0000280.jpg.half.jpg",
"036/undistort/images/frame_0000281.jpg.half.jpg",
"036/undistort/images/frame_0000282.jpg.half.jpg",
"036/undistort/images/frame_0000283.jpg.half.jpg",
"036/undistort/images/frame_0000284.jpg.half.jpg",
"036/undistort/images/frame_0000285.jpg.half.jpg",
"036/undistort/images/frame_0000286.jpg.half.jpg",
"036/undistort/images/frame_0000287.jpg.half.jpg",
"036/undistort/images/frame_0000288.jpg.half.jpg",
"036/undistort/images/frame_0000289.jpg.half.jpg",
"036/undistort/images/frame_0000290.jpg.half.jpg",
"036/undistort/images/frame_0000291.jpg.half.jpg",
"036/undistort/images/frame_0000292.jpg.half.jpg",
"036/undistort/images/frame_0000293.jpg.half.jpg",
"036/undistort/images/frame_0000294.jpg.half.jpg",
"036/undistort/images/frame_0000295.jpg.half.jpg",
"036/undistort/images/frame_0000296.jpg.half.jpg",
"036/undistort/images/frame_0000297.jpg.half.jpg",
"036/undistort/images/frame_0000298.jpg.half.jpg",
"036/undistort/images/frame_0000299.jpg.half.jpg",
"036/undistort/images/frame_0000300.jpg.half.jpg",
"036/undistort/images/frame_0000301.jpg.half.jpg",
"036/undistort/images/frame_0000302.jpg.half.jpg",
"036/undistort/images/frame_0000303.jpg.half.jpg",
"036/undistort/images/frame_0000304.jpg.half.jpg",
"036/undistort/images/frame_0000305.jpg.half.jpg",
"036/undistort/images/frame_0000306.jpg.half.jpg",
"036/undistort/images/frame_0000307.jpg.half.jpg",
"036/undistort/images/frame_0000308.jpg.half.jpg",
"036/undistort/images/frame_0000309.jpg.half.jpg",
"036/undistort/images/frame_0000310.jpg.half.jpg",
"036/undistort/images/frame_0000311.jpg.half.jpg",
"036/undistort/images/frame_0000312.jpg.half.jpg",
"036/undistort/images/frame_0000313.jpg.half.jpg",
"036/undistort/images/frame_0000314.jpg.half.jpg",
"036/undistort/images/frame_0000315.jpg.half.jpg",
"036/undistort/images/frame_0000316.jpg.half.jpg",
"036/undistort/images/frame_0000317.jpg.half.jpg",
"036/undistort/images/frame_0000318.jpg.half.jpg",
"036/undistort/images/frame_0000319.jpg.half.jpg",
"036/undistort/images/frame_0000320.jpg.half.jpg",
"036/undistort/images/frame_0000321.jpg.half.jpg",
"036/undistort/images/frame_0000322.jpg.half.jpg",
"036/undistort/images/frame_0000323.jpg.half.jpg",
"036/undistort/images/frame_0000324.jpg.half.jpg",
"036/undistort/images/frame_0000325.jpg.half.jpg",
"036/undistort/images/frame_0000326.jpg.half.jpg",
"036/undistort/images/frame_0000327.jpg.half.jpg",
"036/undistort/images/frame_0000328.jpg.half.jpg",
"036/undistort/images/frame_0000329.jpg.half.jpg",
"036/undistort/images/frame_0000330.jpg.half.jpg",
"036/undistort/images/frame_0000331.jpg.half.jpg",
"036/undistort/images/frame_0000332.jpg.half.jpg",
"036/undistort/images/frame_0000334.jpg.half.jpg",
"036/undistort/images/frame_0000335.jpg.half.jpg",
"036/undistort/images/frame_0000336.jpg.half.jpg",
"036/undistort/images/frame_0000337.jpg.half.jpg",
"036/undistort/images/frame_0000338.jpg.half.jpg",
"036/undistort/images/frame_0000339.jpg.half.jpg",
"036/undistort/images/frame_0000340.jpg.half.jpg",
"036/undistort/images/frame_0000341.jpg.half.jpg",
"036/undistort/images/frame_0000342.jpg.half.jpg",
"036/undistort/images/frame_0000343.jpg.half.jpg",
"036/undistort/images/frame_0000344.jpg.half.jpg",
"036/undistort/images/frame_0000345.jpg.half.jpg",
"036/undistort/images/frame_0000346.jpg.half.jpg",
"036/undistort/images/frame_0000347.jpg.half.jpg",
"036/undistort/images/frame_0000348.jpg.half.jpg",
"036/undistort/images/frame_0000349.jpg.half.jpg",
"036/undistort/images/frame_0000350.jpg.half.jpg",
"036/undistort/images/frame_0000351.jpg.half.jpg",
"036/undistort/images/frame_0000352.jpg.half.jpg",
"036/undistort/images/frame_0000353.jpg.half.jpg",
"036/undistort/images/frame_0000354.jpg.half.jpg",
"036/undistort/images/frame_0000355.jpg.half.jpg",
"036/undistort/images/frame_0000356.jpg.half.jpg",
"036/undistort/images/frame_0000357.jpg.half.jpg",
"036/undistort/images/frame_0000358.jpg.half.jpg",
"036/undistort/images/frame_0000359.jpg.half.jpg",
"036/undistort/images/frame_0000360.jpg.half.jpg",
"036/undistort/images/frame_0000361.jpg.half.jpg",
"036/undistort/images/frame_0000362.jpg.half.jpg",
"036/undistort/images/frame_0000363.jpg.half.jpg",
"036/undistort/images/frame_0000364.jpg.half.jpg",
"036/undistort/images/frame_0000365.jpg.half.jpg",
"036/undistort/images/frame_0000366.jpg.half.jpg",
"036/undistort/images/frame_0000367.jpg.half.jpg",
"036/undistort/images/frame_0000368.jpg.half.jpg",
"036/undistort/images/frame_0000369.jpg.half.jpg",
"036/undistort/images/frame_0000370.jpg.half.jpg",
"036/undistort/images/frame_0000371.jpg.half.jpg",
"036/undistort/images/frame_0000372.jpg.half.jpg",
"037/undistort/images/frame_0000001.jpg.half.jpg",
"037/undistort/images/frame_0000002.jpg.half.jpg",
"037/undistort/images/frame_0000003.jpg.half.jpg",
"037/undistort/images/frame_0000004.jpg.half.jpg",
"037/undistort/images/frame_0000005.jpg.half.jpg",
"037/undistort/images/frame_0000006.jpg.half.jpg",
"037/undistort/images/frame_0000007.jpg.half.jpg",
"037/undistort/images/frame_0000008.jpg.half.jpg",
"037/undistort/images/frame_0000009.jpg.half.jpg",
"037/undistort/images/frame_0000010.jpg.half.jpg",
"037/undistort/images/frame_0000011.jpg.half.jpg",
"037/undistort/images/frame_0000012.jpg.half.jpg",
"037/undistort/images/frame_0000013.jpg.half.jpg",
"037/undistort/images/frame_0000014.jpg.half.jpg",
"037/undistort/images/frame_0000015.jpg.half.jpg",
"037/undistort/images/frame_0000016.jpg.half.jpg",
"037/undistort/images/frame_0000017.jpg.half.jpg",
"037/undistort/images/frame_0000018.jpg.half.jpg",
"037/undistort/images/frame_0000019.jpg.half.jpg",
"037/undistort/images/frame_0000020.jpg.half.jpg",
"037/undistort/images/frame_0000021.jpg.half.jpg",
"037/undistort/images/frame_0000022.jpg.half.jpg",
"037/undistort/images/frame_0000023.jpg.half.jpg",
"037/undistort/images/frame_0000024.jpg.half.jpg",
"037/undistort/images/frame_0000025.jpg.half.jpg",
"037/undistort/images/frame_0000026.jpg.half.jpg",
"037/undistort/images/frame_0000027.jpg.half.jpg",
"037/undistort/images/frame_0000028.jpg.half.jpg",
"037/undistort/images/frame_0000029.jpg.half.jpg",
"037/undistort/images/frame_0000030.jpg.half.jpg",
"037/undistort/images/frame_0000031.jpg.half.jpg",
"037/undistort/images/frame_0000032.jpg.half.jpg",
"037/undistort/images/frame_0000033.jpg.half.jpg",
"037/undistort/images/frame_0000034.jpg.half.jpg",
"037/undistort/images/frame_0000035.jpg.half.jpg",
"037/undistort/images/frame_0000036.jpg.half.jpg",
"037/undistort/images/frame_0000037.jpg.half.jpg",
"037/undistort/images/frame_0000038.jpg.half.jpg",
"037/undistort/images/frame_0000039.jpg.half.jpg",
"037/undistort/images/frame_0000040.jpg.half.jpg",
"037/undistort/images/frame_0000041.jpg.half.jpg",
"037/undistort/images/frame_0000042.jpg.half.jpg",
"037/undistort/images/frame_0000043.jpg.half.jpg",
"037/undistort/images/frame_0000044.jpg.half.jpg",
"037/undistort/images/frame_0000045.jpg.half.jpg",
"037/undistort/images/frame_0000046.jpg.half.jpg",
"037/undistort/images/frame_0000047.jpg.half.jpg",
"037/undistort/images/frame_0000048.jpg.half.jpg",
"037/undistort/images/frame_0000049.jpg.half.jpg",
"037/undistort/images/frame_0000050.jpg.half.jpg",
"037/undistort/images/frame_0000051.jpg.half.jpg",
"037/undistort/images/frame_0000052.jpg.half.jpg",
"037/undistort/images/frame_0000053.jpg.half.jpg",
"037/undistort/images/frame_0000054.jpg.half.jpg",
"037/undistort/images/frame_0000055.jpg.half.jpg",
"037/undistort/images/frame_0000056.jpg.half.jpg",
"037/undistort/images/frame_0000057.jpg.half.jpg",
"037/undistort/images/frame_0000058.jpg.half.jpg",
"037/undistort/images/frame_0000059.jpg.half.jpg",
"037/undistort/images/frame_0000060.jpg.half.jpg",
"037/undistort/images/frame_0000061.jpg.half.jpg",
"037/undistort/images/frame_0000062.jpg.half.jpg",
"037/undistort/images/frame_0000063.jpg.half.jpg",
"037/undistort/images/frame_0000064.jpg.half.jpg",
"037/undistort/images/frame_0000065.jpg.half.jpg",
"037/undistort/images/frame_0000066.jpg.half.jpg",
"037/undistort/images/frame_0000067.jpg.half.jpg",
"037/undistort/images/frame_0000068.jpg.half.jpg",
"037/undistort/images/frame_0000069.jpg.half.jpg",
"037/undistort/images/frame_0000070.jpg.half.jpg",
"037/undistort/images/frame_0000071.jpg.half.jpg",
"037/undistort/images/frame_0000072.jpg.half.jpg",
"037/undistort/images/frame_0000073.jpg.half.jpg",
"037/undistort/images/frame_0000074.jpg.half.jpg",
"037/undistort/images/frame_0000075.jpg.half.jpg",
"037/undistort/images/frame_0000076.jpg.half.jpg",
"037/undistort/images/frame_0000077.jpg.half.jpg",
"037/undistort/images/frame_0000078.jpg.half.jpg",
"037/undistort/images/frame_0000079.jpg.half.jpg",
"037/undistort/images/frame_0000080.jpg.half.jpg",
"037/undistort/images/frame_0000081.jpg.half.jpg",
"037/undistort/images/frame_0000082.jpg.half.jpg",
"037/undistort/images/frame_0000083.jpg.half.jpg",
"037/undistort/images/frame_0000084.jpg.half.jpg",
"037/undistort/images/frame_0000085.jpg.half.jpg",
"037/undistort/images/frame_0000086.jpg.half.jpg",
"037/undistort/images/frame_0000087.jpg.half.jpg",
"037/undistort/images/frame_0000088.jpg.half.jpg",
"037/undistort/images/frame_0000089.jpg.half.jpg",
"037/undistort/images/frame_0000090.jpg.half.jpg",
"037/undistort/images/frame_0000091.jpg.half.jpg",
"037/undistort/images/frame_0000092.jpg.half.jpg",
"037/undistort/images/frame_0000093.jpg.half.jpg",
"037/undistort/images/frame_0000094.jpg.half.jpg",
"037/undistort/images/frame_0000095.jpg.half.jpg",
"037/undistort/images/frame_0000096.jpg.half.jpg",
"037/undistort/images/frame_0000097.jpg.half.jpg",
"037/undistort/images/frame_0000098.jpg.half.jpg",
"037/undistort/images/frame_0000099.jpg.half.jpg",
"037/undistort/images/frame_0000100.jpg.half.jpg",
"037/undistort/images/frame_0000101.jpg.half.jpg",
"037/undistort/images/frame_0000102.jpg.half.jpg",
"037/undistort/images/frame_0000103.jpg.half.jpg",
"037/undistort/images/frame_0000104.jpg.half.jpg",
"037/undistort/images/frame_0000105.jpg.half.jpg",
"037/undistort/images/frame_0000106.jpg.half.jpg",
"037/undistort/images/frame_0000107.jpg.half.jpg",
"037/undistort/images/frame_0000108.jpg.half.jpg",
"037/undistort/images/frame_0000109.jpg.half.jpg",
"037/undistort/images/frame_0000110.jpg.half.jpg",
"037/undistort/images/frame_0000111.jpg.half.jpg",
"037/undistort/images/frame_0000112.jpg.half.jpg",
"037/undistort/images/frame_0000113.jpg.half.jpg",
"037/undistort/images/frame_0000114.jpg.half.jpg",
"037/undistort/images/frame_0000115.jpg.half.jpg",
"037/undistort/images/frame_0000116.jpg.half.jpg",
"037/undistort/images/frame_0000117.jpg.half.jpg",
"037/undistort/images/frame_0000118.jpg.half.jpg",
"037/undistort/images/frame_0000119.jpg.half.jpg",
"037/undistort/images/frame_0000120.jpg.half.jpg",
"037/undistort/images/frame_0000121.jpg.half.jpg",
"037/undistort/images/frame_0000122.jpg.half.jpg",
"037/undistort/images/frame_0000123.jpg.half.jpg",
"037/undistort/images/frame_0000124.jpg.half.jpg",
"037/undistort/images/frame_0000125.jpg.half.jpg",
"037/undistort/images/frame_0000126.jpg.half.jpg",
"037/undistort/images/frame_0000127.jpg.half.jpg",
"037/undistort/images/frame_0000128.jpg.half.jpg",
"037/undistort/images/frame_0000129.jpg.half.jpg",
"037/undistort/images/frame_0000130.jpg.half.jpg",
"037/undistort/images/frame_0000131.jpg.half.jpg",
"037/undistort/images/frame_0000132.jpg.half.jpg",
"037/undistort/images/frame_0000133.jpg.half.jpg",
"037/undistort/images/frame_0000134.jpg.half.jpg",
"037/undistort/images/frame_0000135.jpg.half.jpg",
"037/undistort/images/frame_0000136.jpg.half.jpg",
"037/undistort/images/frame_0000137.jpg.half.jpg",
"037/undistort/images/frame_0000138.jpg.half.jpg",
"037/undistort/images/frame_0000139.jpg.half.jpg",
"037/undistort/images/frame_0000140.jpg.half.jpg",
"037/undistort/images/frame_0000141.jpg.half.jpg",
"037/undistort/images/frame_0000142.jpg.half.jpg",
"037/undistort/images/frame_0000143.jpg.half.jpg",
"037/undistort/images/frame_0000144.jpg.half.jpg",
"037/undistort/images/frame_0000145.jpg.half.jpg",
"037/undistort/images/frame_0000146.jpg.half.jpg",
"037/undistort/images/frame_0000147.jpg.half.jpg",
"037/undistort/images/frame_0000148.jpg.half.jpg",
"037/undistort/images/frame_0000149.jpg.half.jpg",
"037/undistort/images/frame_0000150.jpg.half.jpg",
"037/undistort/images/frame_0000151.jpg.half.jpg",
"037/undistort/images/frame_0000152.jpg.half.jpg",
"037/undistort/images/frame_0000153.jpg.half.jpg",
"037/undistort/images/frame_0000154.jpg.half.jpg",
"037/undistort/images/frame_0000155.jpg.half.jpg",
"037/undistort/images/frame_0000156.jpg.half.jpg",
"037/undistort/images/frame_0000157.jpg.half.jpg",
"037/undistort/images/frame_0000158.jpg.half.jpg",
"037/undistort/images/frame_0000159.jpg.half.jpg",
"037/undistort/images/frame_0000160.jpg.half.jpg",
"037/undistort/images/frame_0000161.jpg.half.jpg",
"037/undistort/images/frame_0000162.jpg.half.jpg",
"037/undistort/images/frame_0000163.jpg.half.jpg",
"037/undistort/images/frame_0000164.jpg.half.jpg",
"037/undistort/images/frame_0000165.jpg.half.jpg",
"037/undistort/images/frame_0000166.jpg.half.jpg",
"037/undistort/images/frame_0000167.jpg.half.jpg",
"037/undistort/images/frame_0000168.jpg.half.jpg",
"037/undistort/images/frame_0000169.jpg.half.jpg",
"037/undistort/images/frame_0000170.jpg.half.jpg",
"037/undistort/images/frame_0000171.jpg.half.jpg",
"037/undistort/images/frame_0000172.jpg.half.jpg",
"037/undistort/images/frame_0000173.jpg.half.jpg",
"037/undistort/images/frame_0000174.jpg.half.jpg",
"037/undistort/images/frame_0000175.jpg.half.jpg",
"037/undistort/images/frame_0000176.jpg.half.jpg",
"037/undistort/images/frame_0000177.jpg.half.jpg",
"037/undistort/images/frame_0000178.jpg.half.jpg",
"037/undistort/images/frame_0000179.jpg.half.jpg",
"037/undistort/images/frame_0000180.jpg.half.jpg",
"037/undistort/images/frame_0000181.jpg.half.jpg",
"037/undistort/images/frame_0000182.jpg.half.jpg",
"037/undistort/images/frame_0000183.jpg.half.jpg",
"037/undistort/images/frame_0000184.jpg.half.jpg",
"037/undistort/images/frame_0000185.jpg.half.jpg",
"037/undistort/images/frame_0000186.jpg.half.jpg",
"037/undistort/images/frame_0000187.jpg.half.jpg",
"037/undistort/images/frame_0000188.jpg.half.jpg",
"037/undistort/images/frame_0000189.jpg.half.jpg",
"037/undistort/images/frame_0000190.jpg.half.jpg",
"037/undistort/images/frame_0000191.jpg.half.jpg",
"037/undistort/images/frame_0000192.jpg.half.jpg",
"037/undistort/images/frame_0000193.jpg.half.jpg",
"037/undistort/images/frame_0000194.jpg.half.jpg",
"037/undistort/images/frame_0000195.jpg.half.jpg",
"037/undistort/images/frame_0000196.jpg.half.jpg",
"037/undistort/images/frame_0000197.jpg.half.jpg",
"037/undistort/images/frame_0000198.jpg.half.jpg",
"037/undistort/images/frame_0000199.jpg.half.jpg",
"037/undistort/images/frame_0000200.jpg.half.jpg",
"037/undistort/images/frame_0000201.jpg.half.jpg",
"037/undistort/images/frame_0000202.jpg.half.jpg",
"037/undistort/images/frame_0000203.jpg.half.jpg",
"037/undistort/images/frame_0000204.jpg.half.jpg",
"037/undistort/images/frame_0000205.jpg.half.jpg",
"037/undistort/images/frame_0000206.jpg.half.jpg",
"037/undistort/images/frame_0000207.jpg.half.jpg",
"037/undistort/images/frame_0000208.jpg.half.jpg",
"037/undistort/images/frame_0000209.jpg.half.jpg",
"037/undistort/images/frame_0000210.jpg.half.jpg",
"037/undistort/images/frame_0000211.jpg.half.jpg",
"037/undistort/images/frame_0000212.jpg.half.jpg",
"037/undistort/images/frame_0000213.jpg.half.jpg",
"037/undistort/images/frame_0000214.jpg.half.jpg",
"037/undistort/images/frame_0000215.jpg.half.jpg",
"037/undistort/images/frame_0000216.jpg.half.jpg",
"037/undistort/images/frame_0000217.jpg.half.jpg",
"037/undistort/images/frame_0000218.jpg.half.jpg",
"037/undistort/images/frame_0000219.jpg.half.jpg",
"037/undistort/images/frame_0000220.jpg.half.jpg",
"037/undistort/images/frame_0000221.jpg.half.jpg",
"037/undistort/images/frame_0000222.jpg.half.jpg",
"037/undistort/images/frame_0000223.jpg.half.jpg",
"037/undistort/images/frame_0000224.jpg.half.jpg",
"037/undistort/images/frame_0000225.jpg.half.jpg",
"037/undistort/images/frame_0000226.jpg.half.jpg",
"037/undistort/images/frame_0000227.jpg.half.jpg",
"037/undistort/images/frame_0000228.jpg.half.jpg",
"037/undistort/images/frame_0000229.jpg.half.jpg",
"037/undistort/images/frame_0000230.jpg.half.jpg",
"037/undistort/images/frame_0000231.jpg.half.jpg",
"037/undistort/images/frame_0000232.jpg.half.jpg",
"037/undistort/images/frame_0000233.jpg.half.jpg",
"037/undistort/images/frame_0000234.jpg.half.jpg",
"037/undistort/images/frame_0000235.jpg.half.jpg",
"037/undistort/images/frame_0000236.jpg.half.jpg",
"037/undistort/images/frame_0000237.jpg.half.jpg",
"037/undistort/images/frame_0000238.jpg.half.jpg",
"037/undistort/images/frame_0000239.jpg.half.jpg",
"037/undistort/images/frame_0000240.jpg.half.jpg",
"037/undistort/images/frame_0000241.jpg.half.jpg",
"037/undistort/images/frame_0000242.jpg.half.jpg",
"037/undistort/images/frame_0000243.jpg.half.jpg",
"037/undistort/images/frame_0000244.jpg.half.jpg",
"037/undistort/images/frame_0000245.jpg.half.jpg",
"037/undistort/images/frame_0000246.jpg.half.jpg",
"037/undistort/images/frame_0000247.jpg.half.jpg",
"037/undistort/images/frame_0000248.jpg.half.jpg",
"037/undistort/images/frame_0000249.jpg.half.jpg",
"037/undistort/images/frame_0000250.jpg.half.jpg",
"037/undistort/images/frame_0000252.jpg.half.jpg",
"037/undistort/images/frame_0000253.jpg.half.jpg",
"037/undistort/images/frame_0000254.jpg.half.jpg",
"037/undistort/images/frame_0000255.jpg.half.jpg",
"037/undistort/images/frame_0000257.jpg.half.jpg",
"037/undistort/images/frame_0000260.jpg.half.jpg",
"037/undistort/images/frame_0000261.jpg.half.jpg",
"037/undistort/images/frame_0000262.jpg.half.jpg",
"037/undistort/images/frame_0000263.jpg.half.jpg",
"037/undistort/images/frame_0000264.jpg.half.jpg",
"037/undistort/images/frame_0000265.jpg.half.jpg",
"037/undistort/images/frame_0000266.jpg.half.jpg",
"037/undistort/images/frame_0000267.jpg.half.jpg",
"037/undistort/images/frame_0000268.jpg.half.jpg",
"037/undistort/images/frame_0000269.jpg.half.jpg",
"037/undistort/images/frame_0000270.jpg.half.jpg",
"037/undistort/images/frame_0000271.jpg.half.jpg",
"037/undistort/images/frame_0000272.jpg.half.jpg",
"037/undistort/images/frame_0000273.jpg.half.jpg",
"037/undistort/images/frame_0000274.jpg.half.jpg",
"037/undistort/images/frame_0000275.jpg.half.jpg",
"037/undistort/images/frame_0000276.jpg.half.jpg",
"037/undistort/images/frame_0000277.jpg.half.jpg",
"037/undistort/images/frame_0000278.jpg.half.jpg",
"037/undistort/images/frame_0000279.jpg.half.jpg",
"037/undistort/images/frame_0000280.jpg.half.jpg",
"037/undistort/images/frame_0000281.jpg.half.jpg",
"037/undistort/images/frame_0000282.jpg.half.jpg",
"037/undistort/images/frame_0000283.jpg.half.jpg",
"037/undistort/images/frame_0000284.jpg.half.jpg",
"037/undistort/images/frame_0000285.jpg.half.jpg",
"037/undistort/images/frame_0000286.jpg.half.jpg",
"037/undistort/images/frame_0000287.jpg.half.jpg",
"037/undistort/images/frame_0000288.jpg.half.jpg",
"037/undistort/images/frame_0000289.jpg.half.jpg",
"037/undistort/images/frame_0000290.jpg.half.jpg",
"037/undistort/images/frame_0000291.jpg.half.jpg",
"037/undistort/images/frame_0000292.jpg.half.jpg",
"037/undistort/images/frame_0000293.jpg.half.jpg",
"037/undistort/images/frame_0000294.jpg.half.jpg",
"037/undistort/images/frame_0000295.jpg.half.jpg",
"037/undistort/images/frame_0000296.jpg.half.jpg",
"037/undistort/images/frame_0000297.jpg.half.jpg",
"037/undistort/images/frame_0000298.jpg.half.jpg",
"037/undistort/images/frame_0000299.jpg.half.jpg",
"037/undistort/images/frame_0000300.jpg.half.jpg",
"037/undistort/images/frame_0000301.jpg.half.jpg",
"037/undistort/images/frame_0000302.jpg.half.jpg",
"037/undistort/images/frame_0000303.jpg.half.jpg",
"037/undistort/images/frame_0000304.jpg.half.jpg",
"037/undistort/images/frame_0000305.jpg.half.jpg",
"037/undistort/images/frame_0000306.jpg.half.jpg",
"037/undistort/images/frame_0000307.jpg.half.jpg",
"037/undistort/images/frame_0000308.jpg.half.jpg",
"037/undistort/images/frame_0000309.jpg.half.jpg",
"037/undistort/images/frame_0000310.jpg.half.jpg",
"037/undistort/images/frame_0000311.jpg.half.jpg",
"037/undistort/images/frame_0000312.jpg.half.jpg",
"037/undistort/images/frame_0000313.jpg.half.jpg",
"037/undistort/images/frame_0000314.jpg.half.jpg",
"037/undistort/images/frame_0000315.jpg.half.jpg",
"037/undistort/images/frame_0000316.jpg.half.jpg",
"037/undistort/images/frame_0000317.jpg.half.jpg",
"037/undistort/images/frame_0000318.jpg.half.jpg",
"037/undistort/images/frame_0000319.jpg.half.jpg",
"037/undistort/images/frame_0000320.jpg.half.jpg",
"037/undistort/images/frame_0000321.jpg.half.jpg",
"037/undistort/images/frame_0000322.jpg.half.jpg",
"037/undistort/images/frame_0000323.jpg.half.jpg",
"037/undistort/images/frame_0000324.jpg.half.jpg",
"037/undistort/images/frame_0000325.jpg.half.jpg",
"037/undistort/images/frame_0000326.jpg.half.jpg",
"037/undistort/images/frame_0000327.jpg.half.jpg",
"037/undistort/images/frame_0000328.jpg.half.jpg",
"037/undistort/images/frame_0000329.jpg.half.jpg",
"037/undistort/images/frame_0000330.jpg.half.jpg",
"037/undistort/images/frame_0000331.jpg.half.jpg",
"037/undistort/images/frame_0000332.jpg.half.jpg",
"037/undistort/images/frame_0000333.jpg.half.jpg",
"037/undistort/images/frame_0000334.jpg.half.jpg",
"037/undistort/images/frame_0000335.jpg.half.jpg",
"037/undistort/images/frame_0000336.jpg.half.jpg",
"037/undistort/images/frame_0000337.jpg.half.jpg",
"037/undistort/images/frame_0000338.jpg.half.jpg",
"037/undistort/images/frame_0000339.jpg.half.jpg",
"037/undistort/images/frame_0000340.jpg.half.jpg",
"037/undistort/images/frame_0000341.jpg.half.jpg",
"037/undistort/images/frame_0000342.jpg.half.jpg",
"037/undistort/images/frame_0000343.jpg.half.jpg",
"037/undistort/images/frame_0000344.jpg.half.jpg",
"037/undistort/images/frame_0000345.jpg.half.jpg",
"037/undistort/images/frame_0000346.jpg.half.jpg",
"037/undistort/images/frame_0000347.jpg.half.jpg",
"037/undistort/images/frame_0000348.jpg.half.jpg",
"037/undistort/images/frame_0000349.jpg.half.jpg",
"037/undistort/images/frame_0000350.jpg.half.jpg",
"037/undistort/images/frame_0000351.jpg.half.jpg",
"037/undistort/images/frame_0000352.jpg.half.jpg",
"037/undistort/images/frame_0000353.jpg.half.jpg",
"037/undistort/images/frame_0000354.jpg.half.jpg",
"037/undistort/images/frame_0000355.jpg.half.jpg",
"037/undistort/images/frame_0000356.jpg.half.jpg",
"037/undistort/images/frame_0000357.jpg.half.jpg",
"037/undistort/images/frame_0000358.jpg.half.jpg",
"037/undistort/images/frame_0000359.jpg.half.jpg",
"037/undistort/images/frame_0000360.jpg.half.jpg",
"037/undistort/images/frame_0000361.jpg.half.jpg",
"037/undistort/images/frame_0000362.jpg.half.jpg",
"037/undistort/images/frame_0000363.jpg.half.jpg",
"037/undistort/images/frame_0000364.jpg.half.jpg",
"037/undistort/images/frame_0000365.jpg.half.jpg",
"037/undistort/images/frame_0000366.jpg.half.jpg",
"037/undistort/images/frame_0000367.jpg.half.jpg",
"037/undistort/images/frame_0000368.jpg.half.jpg",
"037/undistort/images/frame_0000369.jpg.half.jpg",
"037/undistort/images/frame_0000370.jpg.half.jpg",
"037/undistort/images/frame_0000371.jpg.half.jpg",
"037/undistort/images/frame_0000372.jpg.half.jpg",
"037/undistort/images/frame_0000373.jpg.half.jpg",
"037/undistort/images/frame_0000374.jpg.half.jpg",
"037/undistort/images/frame_0000375.jpg.half.jpg",
"037/undistort/images/frame_0000376.jpg.half.jpg",
"037/undistort/images/frame_0000377.jpg.half.jpg",
"037/undistort/images/frame_0000378.jpg.half.jpg",
"037/undistort/images/frame_0000379.jpg.half.jpg",
"037/undistort/images/frame_0000380.jpg.half.jpg",
"037/undistort/images/frame_0000381.jpg.half.jpg",
"037/undistort/images/frame_0000382.jpg.half.jpg",
"037/undistort/images/frame_0000383.jpg.half.jpg",
"037/undistort/images/frame_0000384.jpg.half.jpg",
"037/undistort/images/frame_0000385.jpg.half.jpg",
"037/undistort/images/frame_0000386.jpg.half.jpg",
"042/undistort/images/frame_0000001.jpg.half.jpg",
"042/undistort/images/frame_0000002.jpg.half.jpg",
"042/undistort/images/frame_0000003.jpg.half.jpg",
"042/undistort/images/frame_0000004.jpg.half.jpg",
"042/undistort/images/frame_0000005.jpg.half.jpg",
"042/undistort/images/frame_0000006.jpg.half.jpg",
"042/undistort/images/frame_0000008.jpg.half.jpg",
"042/undistort/images/frame_0000009.jpg.half.jpg",
"042/undistort/images/frame_0000010.jpg.half.jpg",
"042/undistort/images/frame_0000011.jpg.half.jpg",
"042/undistort/images/frame_0000013.jpg.half.jpg",
"042/undistort/images/frame_0000014.jpg.half.jpg",
"042/undistort/images/frame_0000015.jpg.half.jpg",
"042/undistort/images/frame_0000016.jpg.half.jpg",
"042/undistort/images/frame_0000017.jpg.half.jpg",
"042/undistort/images/frame_0000018.jpg.half.jpg",
"042/undistort/images/frame_0000019.jpg.half.jpg",
"042/undistort/images/frame_0000020.jpg.half.jpg",
"042/undistort/images/frame_0000021.jpg.half.jpg",
"042/undistort/images/frame_0000022.jpg.half.jpg",
"042/undistort/images/frame_0000023.jpg.half.jpg",
"042/undistort/images/frame_0000024.jpg.half.jpg",
"042/undistort/images/frame_0000025.jpg.half.jpg",
"042/undistort/images/frame_0000026.jpg.half.jpg",
"042/undistort/images/frame_0000027.jpg.half.jpg",
"042/undistort/images/frame_0000029.jpg.half.jpg",
"042/undistort/images/frame_0000031.jpg.half.jpg",
"042/undistort/images/frame_0000032.jpg.half.jpg",
"042/undistort/images/frame_0000033.jpg.half.jpg",
"042/undistort/images/frame_0000034.jpg.half.jpg",
"042/undistort/images/frame_0000035.jpg.half.jpg",
"042/undistort/images/frame_0000037.jpg.half.jpg",
"042/undistort/images/frame_0000040.jpg.half.jpg",
"042/undistort/images/frame_0000042.jpg.half.jpg",
"042/undistort/images/frame_0000043.jpg.half.jpg",
"042/undistort/images/frame_0000045.jpg.half.jpg",
"042/undistort/images/frame_0000046.jpg.half.jpg",
"042/undistort/images/frame_0000047.jpg.half.jpg",
"042/undistort/images/frame_0000048.jpg.half.jpg",
"042/undistort/images/frame_0000050.jpg.half.jpg",
"042/undistort/images/frame_0000051.jpg.half.jpg",
"042/undistort/images/frame_0000052.jpg.half.jpg",
"042/undistort/images/frame_0000053.jpg.half.jpg",
"042/undistort/images/frame_0000054.jpg.half.jpg",
"042/undistort/images/frame_0000056.jpg.half.jpg",
"042/undistort/images/frame_0000057.jpg.half.jpg",
"042/undistort/images/frame_0000058.jpg.half.jpg",
"042/undistort/images/frame_0000061.jpg.half.jpg",
"042/undistort/images/frame_0000126.jpg.half.jpg",
"042/undistort/images/frame_0000127.jpg.half.jpg",
"042/undistort/images/frame_0000129.jpg.half.jpg",
"042/undistort/images/frame_0000133.jpg.half.jpg",
"042/undistort/images/frame_0000134.jpg.half.jpg",
"042/undistort/images/frame_0000135.jpg.half.jpg",
"042/undistort/images/frame_0000136.jpg.half.jpg",
"042/undistort/images/frame_0000137.jpg.half.jpg",
"042/undistort/images/frame_0000138.jpg.half.jpg",
"042/undistort/images/frame_0000139.jpg.half.jpg",
"042/undistort/images/frame_0000140.jpg.half.jpg",
"042/undistort/images/frame_0000141.jpg.half.jpg",
"042/undistort/images/frame_0000143.jpg.half.jpg",
"042/undistort/images/frame_0000144.jpg.half.jpg",
"042/undistort/images/frame_0000146.jpg.half.jpg",
"042/undistort/images/frame_0000147.jpg.half.jpg",
"042/undistort/images/frame_0000148.jpg.half.jpg",
"042/undistort/images/frame_0000150.jpg.half.jpg",
"042/undistort/images/frame_0000151.jpg.half.jpg",
"042/undistort/images/frame_0000152.jpg.half.jpg",
"042/undistort/images/frame_0000153.jpg.half.jpg",
"042/undistort/images/frame_0000154.jpg.half.jpg",
"042/undistort/images/frame_0000158.jpg.half.jpg",
"042/undistort/images/frame_0000159.jpg.half.jpg",
"042/undistort/images/frame_0000161.jpg.half.jpg",
"042/undistort/images/frame_0000163.jpg.half.jpg",
"042/undistort/images/frame_0000164.jpg.half.jpg",
"042/undistort/images/frame_0000165.jpg.half.jpg",
"042/undistort/images/frame_0000166.jpg.half.jpg",
"042/undistort/images/frame_0000167.jpg.half.jpg",
"042/undistort/images/frame_0000168.jpg.half.jpg",
"042/undistort/images/frame_0000169.jpg.half.jpg",
"042/undistort/images/frame_0000170.jpg.half.jpg",
"042/undistort/images/frame_0000172.jpg.half.jpg",
"042/undistort/images/frame_0000173.jpg.half.jpg",
"042/undistort/images/frame_0000174.jpg.half.jpg",
"042/undistort/images/frame_0000175.jpg.half.jpg",
"042/undistort/images/frame_0000176.jpg.half.jpg",
"042/undistort/images/frame_0000177.jpg.half.jpg",
"042/undistort/images/frame_0000178.jpg.half.jpg",
"042/undistort/images/frame_0000179.jpg.half.jpg",
"042/undistort/images/frame_0000180.jpg.half.jpg",
"042/undistort/images/frame_0000181.jpg.half.jpg",
"042/undistort/images/frame_0000182.jpg.half.jpg",
"042/undistort/images/frame_0000183.jpg.half.jpg",
"042/undistort/images/frame_0000184.jpg.half.jpg",
"042/undistort/images/frame_0000185.jpg.half.jpg",
"042/undistort/images/frame_0000186.jpg.half.jpg",
"042/undistort/images/frame_0000187.jpg.half.jpg",
"042/undistort/images/frame_0000188.jpg.half.jpg",
"042/undistort/images/frame_0000189.jpg.half.jpg",
"042/undistort/images/frame_0000190.jpg.half.jpg",
"042/undistort/images/frame_0000191.jpg.half.jpg",
"042/undistort/images/frame_0000192.jpg.half.jpg",
"042/undistort/images/frame_0000193.jpg.half.jpg",
"042/undistort/images/frame_0000194.jpg.half.jpg",
"042/undistort/images/frame_0000195.jpg.half.jpg",
"042/undistort/images/frame_0000196.jpg.half.jpg",
"042/undistort/images/frame_0000197.jpg.half.jpg",
"042/undistort/images/frame_0000198.jpg.half.jpg",
"042/undistort/images/frame_0000199.jpg.half.jpg",
"042/undistort/images/frame_0000200.jpg.half.jpg",
"042/undistort/images/frame_0000201.jpg.half.jpg",
"042/undistort/images/frame_0000202.jpg.half.jpg",
"042/undistort/images/frame_0000203.jpg.half.jpg",
"042/undistort/images/frame_0000204.jpg.half.jpg",
"042/undistort/images/frame_0000205.jpg.half.jpg",
"042/undistort/images/frame_0000207.jpg.half.jpg",
"042/undistort/images/frame_0000208.jpg.half.jpg",
"042/undistort/images/frame_0000209.jpg.half.jpg",
"042/undistort/images/frame_0000210.jpg.half.jpg",
"042/undistort/images/frame_0000211.jpg.half.jpg",
"042/undistort/images/frame_0000214.jpg.half.jpg",
"042/undistort/images/frame_0000225.jpg.half.jpg",
"042/undistort/images/frame_0000231.jpg.half.jpg",
"042/undistort/images/frame_0000232.jpg.half.jpg",
"042/undistort/images/frame_0000233.jpg.half.jpg",
"042/undistort/images/frame_0000234.jpg.half.jpg",
"042/undistort/images/frame_0000235.jpg.half.jpg",
"042/undistort/images/frame_0000237.jpg.half.jpg",
"042/undistort/images/frame_0000238.jpg.half.jpg",
"042/undistort/images/frame_0000239.jpg.half.jpg",
"042/undistort/images/frame_0000241.jpg.half.jpg",
"042/undistort/images/frame_0000242.jpg.half.jpg",
"042/undistort/images/frame_0000243.jpg.half.jpg",
"042/undistort/images/frame_0000244.jpg.half.jpg",
"042/undistort/images/frame_0000245.jpg.half.jpg",
"042/undistort/images/frame_0000246.jpg.half.jpg",
"042/undistort/images/frame_0000247.jpg.half.jpg",
"042/undistort/images/frame_0000248.jpg.half.jpg",
"042/undistort/images/frame_0000251.jpg.half.jpg",
"042/undistort/images/frame_0000252.jpg.half.jpg",
"042/undistort/images/frame_0000253.jpg.half.jpg",
"042/undistort/images/frame_0000254.jpg.half.jpg",
"042/undistort/images/frame_0000255.jpg.half.jpg",
"042/undistort/images/frame_0000256.jpg.half.jpg",
"042/undistort/images/frame_0000257.jpg.half.jpg",]
| 47.385289
| 109
| 0.766234
| 12,429
| 81,171
| 4.869981
| 0.048516
| 0.353632
| 0.47151
| 0.082043
| 0.963356
| 0.957161
| 0.956203
| 0.01852
| 0.015811
| 0.015084
| 0
| 0.195234
| 0.055488
| 81,171
| 1,713
| 110
| 47.385289
| 0.594271
| 0.003055
| 0
| 0.039333
| 0
| 0.000596
| 0.859076
| 0.837029
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.000596
| 0
| 0.000596
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7b1bbe2e5652c92d170b9306510210eb88505ed9
| 46
|
py
|
Python
|
oj/custom_settings.py
|
gogiluv/c2s
|
77a798f97c43405f6ce0cad7223b4e78cb01c953
|
[
"MIT"
] | null | null | null |
oj/custom_settings.py
|
gogiluv/c2s
|
77a798f97c43405f6ce0cad7223b4e78cb01c953
|
[
"MIT"
] | null | null | null |
oj/custom_settings.py
|
gogiluv/c2s
|
77a798f97c43405f6ce0cad7223b4e78cb01c953
|
[
"MIT"
] | null | null | null |
SECRET_KEY="af94e463ee18bc1309844259370defa0"
| 23
| 45
| 0.913043
| 3
| 46
| 13.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.466667
| 0.021739
| 46
| 1
| 46
| 46
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0.695652
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7b976659c33ddc4df4e2c8b163b504960b3c1301
| 117
|
py
|
Python
|
summarization/summarizer/__init__.py
|
Untesler/New-s
|
bdc7f98e6abe783b3b304c351204a13432b3d287
|
[
"Apache-2.0"
] | null | null | null |
summarization/summarizer/__init__.py
|
Untesler/New-s
|
bdc7f98e6abe783b3b304c351204a13432b3d287
|
[
"Apache-2.0"
] | 4
|
2020-03-16T05:18:42.000Z
|
2021-12-13T20:40:36.000Z
|
summarization/summarizer/__init__.py
|
Untesler/New-s
|
bdc7f98e6abe783b3b304c351204a13432b3d287
|
[
"Apache-2.0"
] | 1
|
2020-05-26T16:01:58.000Z
|
2020-05-26T16:01:58.000Z
|
from summarization.summarizer.SentenceRank import SentenceRank
from summarization.summarizer.TextRank import TextRank
| 58.5
| 62
| 0.905983
| 12
| 117
| 8.833333
| 0.5
| 0.320755
| 0.509434
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059829
| 117
| 2
| 63
| 58.5
| 0.963636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c8d9abeccbed24a836506216e9feabd2ba435b50
| 55,071
|
py
|
Python
|
octopus_deploy_swagger_client/octopus_deploy_client/subscription_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/octopus_deploy_client/subscription_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/octopus_deploy_client/subscription_api.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from octopus_deploy_swagger_client.api_client import ApiClient
class SubscriptionApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_response_descriptor_subscriptions_subscription_subscription_resource(self, **kwargs): # noqa: E501
"""Create a SubscriptionResource # noqa: E501
Creates a new subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_response_descriptor_subscriptions_subscription_subscription_resource(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SubscriptionResource subscription_resource: The SubscriptionResource resource to create
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(**kwargs) # noqa: E501
return data
def create_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(self, **kwargs): # noqa: E501
"""Create a SubscriptionResource # noqa: E501
Creates a new subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param SubscriptionResource subscription_resource: The SubscriptionResource resource to create
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['subscription_resource'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_response_descriptor_subscriptions_subscription_subscription_resource" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'subscription_resource' in params:
body_params = params['subscription_resource']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/subscriptions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SubscriptionResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self, base_space_id, **kwargs): # noqa: E501
"""Create a SubscriptionResource # noqa: E501
Creates a new subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_response_descriptor_subscriptions_subscription_subscription_resource_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param SubscriptionResource subscription_resource: The SubscriptionResource resource to create
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.create_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def create_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""Create a SubscriptionResource # noqa: E501
Creates a new subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param SubscriptionResource subscription_resource: The SubscriptionResource resource to create
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'subscription_resource'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_response_descriptor_subscriptions_subscription_subscription_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `create_response_descriptor_subscriptions_subscription_subscription_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'subscription_resource' in params:
body_params = params['subscription_resource']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/subscriptions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SubscriptionResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(self, id, **kwargs): # noqa: E501
"""Delete a SubscriptionResource by ID # noqa: E501
Deletes an existing subscription. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the SubscriptionResource to delete (required)
:return: TaskResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete a SubscriptionResource by ID # noqa: E501
Deletes an existing subscription. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the SubscriptionResource to delete (required)
:return: TaskResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/subscriptions/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""Delete a SubscriptionResource by ID # noqa: E501
Deletes an existing subscription. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the SubscriptionResource to delete (required)
:return: TaskResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""Delete a SubscriptionResource by ID # noqa: E501
Deletes an existing subscription. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the SubscriptionResource to delete (required)
:return: TaskResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/subscriptions/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TaskResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def index_response_descriptor_subscriptions_subscription_subscription_resource(self, **kwargs): # noqa: E501
"""Get a list of SubscriptionResources # noqa: E501
Lists all of the subscriptions in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.index_response_descriptor_subscriptions_subscription_subscription_resource(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip: Number of items to skip
:param int take: Number of items to take
:return: ResourceCollectionSubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.index_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.index_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(**kwargs) # noqa: E501
return data
def index_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(self, **kwargs): # noqa: E501
"""Get a list of SubscriptionResources # noqa: E501
Lists all of the subscriptions in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.index_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip: Number of items to skip
:param int take: Number of items to take
:return: ResourceCollectionSubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['skip', 'take'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method index_response_descriptor_subscriptions_subscription_subscription_resource" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
if 'take' in params:
query_params.append(('take', params['take'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/subscriptions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceCollectionSubscriptionResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self, base_space_id, **kwargs): # noqa: E501
"""Get a list of SubscriptionResources # noqa: E501
Lists all of the subscriptions in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.index_response_descriptor_subscriptions_subscription_subscription_resource_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param int skip: Number of items to skip
:param int take: Number of items to take
:return: ResourceCollectionSubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.index_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.index_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def index_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""Get a list of SubscriptionResources # noqa: E501
Lists all of the subscriptions in the supplied Octopus Deploy Space. The results will be sorted alphabetically by name. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.index_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param int skip: Number of items to skip
:param int take: Number of items to take
:return: ResourceCollectionSubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'skip', 'take'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method index_response_descriptor_subscriptions_subscription_subscription_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `index_response_descriptor_subscriptions_subscription_subscription_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
if 'take' in params:
query_params.append(('take', params['take'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/subscriptions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceCollectionSubscriptionResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_all_response_descriptor_subscriptions_subscription_subscription_resource(self, **kwargs): # noqa: E501
"""Get a list of SubscriptionResources # noqa: E501
Lists all the subscriptions in the supplied Octopus Deploy Space. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_response_descriptor_subscriptions_subscription_subscription_resource(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[SubscriptionResource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_all_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_all_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(**kwargs) # noqa: E501
return data
def list_all_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(self, **kwargs): # noqa: E501
"""Get a list of SubscriptionResources # noqa: E501
Lists all the subscriptions in the supplied Octopus Deploy Space. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[SubscriptionResource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_all_response_descriptor_subscriptions_subscription_subscription_resource" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/subscriptions/all', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SubscriptionResource]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self, base_space_id, **kwargs): # noqa: E501
"""Get a list of SubscriptionResources # noqa: E501
Lists all the subscriptions in the supplied Octopus Deploy Space. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: list[SubscriptionResource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
else:
(data) = self.list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, **kwargs) # noqa: E501
return data
def list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(self, base_space_id, **kwargs): # noqa: E501
"""Get a list of SubscriptionResources # noqa: E501
Lists all the subscriptions in the supplied Octopus Deploy Space. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:return: list[SubscriptionResource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `list_all_response_descriptor_subscriptions_subscription_subscription_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/subscriptions/all', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[SubscriptionResource]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def load_response_descriptor_subscriptions_subscription_subscription_resource(self, id, **kwargs): # noqa: E501
"""Get a SubscriptionResource by ID # noqa: E501
Get a subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_response_descriptor_subscriptions_subscription_subscription_resource(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the SubscriptionResource to load (required)
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.load_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.load_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(id, **kwargs) # noqa: E501
return data
def load_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(self, id, **kwargs): # noqa: E501
"""Get a SubscriptionResource by ID # noqa: E501
Get a subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the SubscriptionResource to load (required)
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method load_response_descriptor_subscriptions_subscription_subscription_resource" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `load_response_descriptor_subscriptions_subscription_subscription_resource`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/subscriptions/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SubscriptionResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""Get a SubscriptionResource by ID # noqa: E501
Get a subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_response_descriptor_subscriptions_subscription_subscription_resource_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the SubscriptionResource to load (required)
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.load_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.load_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def load_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""Get a SubscriptionResource by ID # noqa: E501
Get a subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.load_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the SubscriptionResource to load (required)
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method load_response_descriptor_subscriptions_subscription_subscription_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `load_response_descriptor_subscriptions_subscription_subscription_resource_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `load_response_descriptor_subscriptions_subscription_subscription_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/subscriptions/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SubscriptionResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_response_descriptor_subscriptions_subscription_subscription_resource(self, id, **kwargs): # noqa: E501
"""Modify a SubscriptionResource by ID # noqa: E501
Updates an existing subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_response_descriptor_subscriptions_subscription_subscription_resource(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the SubscriptionResource to modify (required)
:param SubscriptionResource subscription_resource: The SubscriptionResource resource to create
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.modify_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(id, **kwargs) # noqa: E501
return data
def modify_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(self, id, **kwargs): # noqa: E501
"""Modify a SubscriptionResource by ID # noqa: E501
Updates an existing subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_response_descriptor_subscriptions_subscription_subscription_resource_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: ID of the SubscriptionResource to modify (required)
:param SubscriptionResource subscription_resource: The SubscriptionResource resource to create
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'subscription_resource'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_response_descriptor_subscriptions_subscription_subscription_resource" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `modify_response_descriptor_subscriptions_subscription_subscription_resource`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'subscription_resource' in params:
body_params = params['subscription_resource']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/subscriptions/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SubscriptionResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self, base_space_id, id, **kwargs): # noqa: E501
"""Modify a SubscriptionResource by ID # noqa: E501
Updates an existing subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the SubscriptionResource to modify (required)
:param SubscriptionResource subscription_resource: The SubscriptionResource resource to create
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
else:
(data) = self.modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, id, **kwargs) # noqa: E501
return data
def modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(self, base_space_id, id, **kwargs): # noqa: E501
"""Modify a SubscriptionResource by ID # noqa: E501
Updates an existing subscription # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces_with_http_info(base_space_id, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str base_space_id: ID of the space (required)
:param str id: ID of the SubscriptionResource to modify (required)
:param SubscriptionResource subscription_resource: The SubscriptionResource resource to create
:return: SubscriptionResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['base_space_id', 'id', 'subscription_resource'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'base_space_id' is set
if ('base_space_id' not in params or
params['base_space_id'] is None):
raise ValueError("Missing the required parameter `base_space_id` when calling `modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `modify_response_descriptor_subscriptions_subscription_subscription_resource_spaces`") # noqa: E501
collection_formats = {}
path_params = {}
if 'base_space_id' in params:
path_params['baseSpaceId'] = params['base_space_id'] # noqa: E501
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'subscription_resource' in params:
body_params = params['subscription_resource']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'APIKeyQuery', 'NugetApiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/api/{baseSpaceId}/subscriptions/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SubscriptionResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 45.626346
| 204
| 0.660929
| 6,104
| 55,071
| 5.6481
| 0.031619
| 0.040144
| 0.086321
| 0.119735
| 0.984975
| 0.984975
| 0.984975
| 0.981668
| 0.981088
| 0.980711
| 0
| 0.013851
| 0.261935
| 55,071
| 1,206
| 205
| 45.664179
| 0.83435
| 0.333061
| 0
| 0.825886
| 1
| 0
| 0.220172
| 0.101532
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038521
| false
| 0
| 0.006163
| 0
| 0.101695
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a81e4fc644f4578131603615065835a6bd7585bb
| 2,446
|
py
|
Python
|
warp/transpiler/Operations/Comparisons.py
|
swapnilraj/warp
|
2fb1fa105fc5c46b2e53790fb0a2f7165b4133a1
|
[
"Apache-2.0"
] | null | null | null |
warp/transpiler/Operations/Comparisons.py
|
swapnilraj/warp
|
2fb1fa105fc5c46b2e53790fb0a2f7165b4133a1
|
[
"Apache-2.0"
] | null | null | null |
warp/transpiler/Operations/Comparisons.py
|
swapnilraj/warp
|
2fb1fa105fc5c46b2e53790fb0a2f7165b4133a1
|
[
"Apache-2.0"
] | null | null | null |
from transpiler.Operations.Binary import Binary
from transpiler.Operations.Unary import Unary
from transpiler.utils import uint256_to_int256
class IsZero(Unary):
def evaluate_eagerly(self, x):
return x == 0
def generate_cairo_code(self, op, res):
return [
f"let (local {res} : Uint256) = is_zero{{range_check_ptr=range_check_ptr}}({op})"
]
@classmethod
def required_imports(cls):
return {"evm.uint256": {"is_zero"}}
class Eq(Binary):
@classmethod
def evaluate_eagerly(self, x, y):
return x == y
def generate_cairo_code(self, op1, op2, res):
return [
f"let (local {res} : Uint256) = is_eq{{range_check_ptr=range_check_ptr}}({op1}, {op2})"
]
@classmethod
def required_imports(cls):
return {"evm.uint256": {"is_eq"}}
class Lt(Binary):
def evaluate_eagerly(self, x, y):
return x < y
def generate_cairo_code(self, op1, op2, res):
return [
"local memory_dict : DictAccess* = memory_dict",
f"let (local {res} : Uint256) = is_lt{{range_check_ptr=range_check_ptr}}({op1}, {op2})",
]
@classmethod
def required_imports(cls):
return {"evm.uint256": {"is_lt"}}
class Gt(Binary):
def evaluate_eagerly(self, x, y):
return x > y
def generate_cairo_code(self, op1, op2, res):
return [
f"let (local {res} : Uint256) = is_gt{{range_check_ptr=range_check_ptr}}({op1}, {op2})"
]
@classmethod
def required_imports(cls):
return {"evm.uint256": {"is_gt"}}
def slt(a, b):
return uint256_to_int256(a) < uint256_to_int256(b)
class Slt(Binary):
def evaluate_eagerly(self, x, y):
return slt(x, y)
def generate_cairo_code(self, op1, op2, res):
return [
f"let (local {res} : Uint256) = slt{{range_check_ptr=range_check_ptr}}({op1}, {op2})"
]
@classmethod
def required_imports(cls):
return {"evm.uint256": {"slt"}}
def sgt(a, b):
return uint256_to_int256(a) > uint256_to_int256(b)
class Sgt(Binary):
def evaluate_eagerly(self, x, y):
return sgt(x, y)
def generate_cairo_code(self, op1, op2, res):
return [
f"let (local {res} : Uint256) = sgt{{range_check_ptr=range_check_ptr}}({op1}, {op2})"
]
@classmethod
def required_imports(cls):
return {"evm.uint256": {"sgt"}}
| 24.959184
| 100
| 0.603434
| 323
| 2,446
| 4.359133
| 0.154799
| 0.085227
| 0.110795
| 0.09375
| 0.801847
| 0.768466
| 0.735085
| 0.735085
| 0.662642
| 0.627131
| 0
| 0.04796
| 0.258381
| 2,446
| 97
| 101
| 25.216495
| 0.728225
| 0
| 0
| 0.42029
| 0
| 0.072464
| 0.25879
| 0.114064
| 0
| 0
| 0
| 0
| 0
| 1
| 0.289855
| false
| 0
| 0.130435
| 0.289855
| 0.797101
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
a9a26433bded0bf41128be06127c6ffedbba3bfc
| 160,697
|
py
|
Python
|
DepressUtil.py
|
yecfly/DEPRESSIONEST
|
21b72906aac9f310e264f7a5eea348480a647197
|
[
"Unlicense"
] | null | null | null |
DepressUtil.py
|
yecfly/DEPRESSIONEST
|
21b72906aac9f310e264f7a5eea348480a647197
|
[
"Unlicense"
] | null | null | null |
DepressUtil.py
|
yecfly/DEPRESSIONEST
|
21b72906aac9f310e264f7a5eea348480a647197
|
[
"Unlicense"
] | null | null | null |
#Here starts the depression estimation processes
##from 2017.09.26, the stop criteria of training is changing to
####patched on 20180915, fix the bug(implementation logic error for tlabels) in Valid_on_TestSet_3NI
##
import numpy as np
import tensorflow as tf
import os, pickle, time, sys, traceback, collections
import DataSetPrepare
import tflearn
from DataSetPrepare import Dataset_Dictionary
import win_unicode_console
win_unicode_console.enable()
continue_test=True #set continuesly test for M7
OverTimes=20
M3N4S1={'eye_conv1_1_3x3/W:0':0,'eye_conv1_1_3x3/b:0':0,
'eye_conv1_2_3x3/W:0':0,'eye_conv1_2_3x3/b:0':0,
'eye_conv2_1_3x3/W:0':0,'eye_conv2_1_3x3/b:0':0,
'eye_conv2_2_3x3/W:0':0,'eye_conv2_2_3x3/b:0':0,
'eye_fc2/W:0':0, 'eye_fc2/b:0':0,
'eye_conv3_1_3x3/W:0':0,'eye_conv3_1_3x3/b:0':0,
'eye_conv3_2_3x3/W:0':0,'eye_conv3_2_3x3/b:0':0,
'eye_fc1/W:0':0,'eye_fc1/b:0':0}
M3N4S2={'middle_conv1_1_3x3/W:0':0,'middle_conv1_1_3x3/b:0':0,
'middle_conv1_2_3x3/W:0':0,'middle_conv1_2_3x3/b:0':0,
'middle_conv2_1_3x3/W:0':0,'middle_conv2_1_3x3/b:0':0,
'middle_conv2_2_3x3/W:0':0,'middle_conv2_2_3x3/b:0':0,
'middle_conv3_1_3x3/W:0':0,'middle_conv3_1_3x3/b:0':0,
'middle_conv3_2_3x3/W:0':0,'middle_conv3_2_3x3/b:0':0,
'middle_fc1/W:0':0,'middle_fc1/b:0':0}
M3N4S3={'mouth_conv1_1_3x3/W:0':0,'mouth_conv1_1_3x3/b:0':0,
'mouth_conv1_2_3x3/W:0':0,'mouth_conv1_2_3x3/b:0':0,
'mouth_conv2_1_3x3/W:0':0,'mouth_conv2_1_3x3/b:0':0,
'mouth_conv2_2_3x3/W:0':0,'mouth_conv2_2_3x3/b:0':0,
'mouth_conv3_1_3x3/W:0':0,'mouth_conv3_1_3x3/b:0':0,
'mouth_conv3_2_3x3/W:0':0,'mouth_conv3_2_3x3/b:0':0,
'mouth_fc1/W:0':0,'mouth_fc1/b:0':0}
M3N5S1={'eye_conv1_1_3x3/W:0':0,'eye_conv1_1_3x3/b:0':0,
'eye_conv1_2_3x3/W:0':0,'eye_conv1_2_3x3/b:0':0,
'eye_conv2_1_3x3/W:0':0,'eye_conv2_1_3x3/b:0':0,
'eye_conv2_2_3x3/W:0':0,'eye_conv2_2_3x3/b:0':0,
'eye_conv3_1_3x3/W:0':0,'eye_conv3_1_3x3/b:0':0,
'eye_conv3_2_3x3/W:0':0,'eye_conv3_2_3x3/b:0':0,
'eye_fc1/W:0':0,'eye_fc1/b:0':0}
M3N5S2={'middle_conv1_1_3x3/W:0':0,'middle_conv1_1_3x3/b:0':0,
'middle_conv1_2_3x3/W:0':0,'middle_conv1_2_3x3/b:0':0,
'middle_conv2_1_3x3/W:0':0,'middle_conv2_1_3x3/b:0':0,
'middle_conv2_2_3x3/W:0':0,'middle_conv2_2_3x3/b:0':0,
'middle_fc2/W:0':0, 'middle_fc2/b:0':0,
'middle_conv3_1_3x3/W:0':0,'middle_conv3_1_3x3/b:0':0,
'middle_conv3_2_3x3/W:0':0,'middle_conv3_2_3x3/b:0':0,
'middle_fc1/W:0':0,'middle_fc1/b:0':0}
M3N5S3={'mouth_conv1_1_3x3/W:0':0,'mouth_conv1_1_3x3/b:0':0,
'mouth_conv1_2_3x3/W:0':0,'mouth_conv1_2_3x3/b:0':0,
'mouth_conv2_1_3x3/W:0':0,'mouth_conv2_1_3x3/b:0':0,
'mouth_conv2_2_3x3/W:0':0,'mouth_conv2_2_3x3/b:0':0,
'mouth_fc2/W:0':0, 'mouth_fc2/b:0':0,
'mouth_conv3_1_3x3/W:0':0,'mouth_conv3_1_3x3/b:0':0,
'mouth_conv3_2_3x3/W:0':0,'mouth_conv3_2_3x3/b:0':0,
'mouth_fc1/W:0':0,'mouth_fc1/b:0':0}
lr_drate=0.8
batchsize_step=0
times=20 #which control the decay learning rate decays at every %times% epochs
test_bat=200
TestNumLimit = 200
Mini_Epochs = 140
show_threshold = 1.62
class SIMSTS():
def __init__(self, NC):
self.min=1.0
self.max=0.0
self.amout=0
self.mean=0
self.count=NC
def addFigure(self, figure):
if self.min>figure:
self.min=figure
if self.max<figure:
self.max=figure
self.amout=self.amout+figure
def getSTS(self):
self.mean=self.amout/self.count
return self.mean, self.max, self.min
def logfile(self, Module, Dataset, Network, NE, MSS, MSL):
filename='./logs/M%dtests/D%d_N%d.txt'%(Module, Dataset, Network)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
filein=open(filename,'a')
filein.write('MEAN:%.6f\tMAX:%.6f\tMIN:%.6f\tnum_estimators:%d\tmin_samples_split:%d\tmin_samples_leaf:%d\tD%d\tN%d\n'%(self.mean,
self.max, self.min, NE, MSS, MSL,Dataset, Network))
filein.close()
def initialize_dirs():
if not os.path.exists('./logs/VL'):
os.makedirs('./logs/VL')
if not os.path.exists('./saves'):
os.makedirs('./saves')
class LOSS_ANA:
'''The LOSS_ANA class collects the training losses and analyzes them.
The initial length should be divided by 50 with no remainder.'''
def __init__(self):
self.__Validation_Loss_List = []
self.__Current_Length = 0#indicates whether the Validation_Loss_List has reach the maximum Length
self.__Min_Loss = 10000.0
self.__Min_Loss_Second = 10001.0
@property
def minimun_loss(self):
return self.__Min_Loss
@property
def second_minimun_loss(self):
return self.__Min_Loss_Second
@property
def loss_length(self):
return self.__Current_Length
def setMinimun_loss(self, m):
self.__Min_Loss=m
def analyzeLossVariation(self, loss):
'''Analize the LastN*2 validation losses, where LastN is defined in __init__
Inputs:
loss: float type, the current loss of the validation set
Outputs:
boolean type: indicates whether the input is less than all others before it
'''
self.__Current_Length = self.__Current_Length + 1
flag=False
if loss < self.__Min_Loss:
self.__Min_Loss_Second = self.__Min_Loss
self.__Min_Loss = loss
flag=True
self.__Validation_Loss_List.append(loss)
return flag
def outputlosslist(self, logfilename):
'''input the file name to log out all the validation losses in the current training'''
fw=open(logfilename,'w')
for v in self.__Validation_Loss_List:
fw.write('%.16f\n'%(v))
fw.close()
def calR(predict_labels_in, groundtruth_labels_in, cn=7):
#print(len(predict_labels_in.shape))
#print(len(predict_labels_in))
#print(len(np.asarray(groundtruth_labels_in).shape))
#print(len(groundtruth_labels_in))
#exit()
if len(np.asarray(predict_labels_in).shape)==1:
predict_labels=DataSetPrepare.dense_to_one_hot(predict_labels_in, cn)
#print(predict_labels.shape)
else:
predict_labels=predict_labels_in
if len(np.asarray(groundtruth_labels_in).shape)==1:
groundtruth_labels=DataSetPrepare.dense_to_one_hot(groundtruth_labels_in, cn)
#print(groundtruth_labels.shape)
else:
groundtruth_labels=groundtruth_labels_in
assert len(predict_labels)==len(groundtruth_labels), ('predict_labels length: %d groundtruth_labels length: %d' % (len(predict_labels), len(groundtruth_labels)))
nc=len(groundtruth_labels)
g_c=np.zeros([cn])
#confusion_mat=[[0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0],
# [0,0,0,0,0,0,0]]
confusion_mat=list(np.zeros([cn,cn]))
for i in range(nc):
cmi=list(groundtruth_labels[i]).index(max(groundtruth_labels[i]))
g_c[cmi]=g_c[cmi]+1
pri=list(predict_labels[i]).index(max(predict_labels[i]))
confusion_mat[cmi][pri]=confusion_mat[cmi][pri]+1
for i in range(len(g_c)):
if g_c[i]>0:
confusion_mat[i]=list(np.asarray(confusion_mat[i])/g_c[i])
return confusion_mat
def overAllAccuracy(conf_m, afc=None):
accuracy_for_every_categary=[]
r=len(conf_m)
if r>0:
c=len(conf_m[0])
else:
print('ERROR: Confusion Matrix is unexpected.')
exit()
assert r==c, ('ERROR: Confusion Matrix is unexpected for its unequal rows and cols: %d %d'%(r,c))
ac=0.0
for i in range(r):
ac=ac+conf_m[i][i]
accuracy_for_every_categary.append(conf_m[i][i])
ac=ac/r
if not afc is None:
afc=afc.extend(accuracy_for_every_categary)
del accuracy_for_every_categary
return ac
def Valid_on_TestSet(cn, sess, accuracy, sum_test, loss, softmax,
placeholder1, placeholder1_input,
placeholder_labels, placeholder_labels_input,afc=None):
'''Evalute the data with 1 network input in the session input
Inputs:
sess:
accuracy:
sum_test:
loss:
softmax:
Outputs:
v_accuracy:
valid_loss:
oaa:
confu_mat'''
ncount=len(placeholder_labels_input)
tlabels=[]
if ncount>TestNumLimit:
test_iter=np.floor_divide(ncount,test_bat)
v_accuracy=0
valid_loss=0
for ite in range(test_iter):
start=test_bat*ite
end=test_bat*(ite+1)
st, v_loss, tlab=sess.run([sum_test, loss, softmax], feed_dict={placeholder1:placeholder1_input[start:end],
placeholder_labels:placeholder_labels_input[start:end]})
v_accuracy=v_accuracy+st
valid_loss=valid_loss+v_loss
tlabels.extend(tlab)
if ncount%test_bat>0:
st, v_loss, tlab=sess.run([sum_test, loss, softmax], feed_dict={placeholder1:placeholder1_input[test_bat*test_iter:ncount],
placeholder_labels:placeholder_labels_input[test_bat*test_iter:ncount]})
v_accuracy=v_accuracy+st
valid_loss=valid_loss+v_loss
v_accuracy=v_accuracy/ncount
valid_loss=valid_loss/(test_iter+1)
tlabels.extend(tlab)
else:
v_accuracy, valid_loss, tlab = sess.run([accuracy, loss, softmax], feed_dict={placeholder1:placeholder1_input,
placeholder_labels:placeholder_labels_input})
tlabels.extend(tlab)
confu_mat=calR(tlabels, placeholder_labels_input, cn)
oaa=overAllAccuracy(confu_mat,afc=afc)
return v_accuracy, valid_loss, oaa, confu_mat
def Valid_on_TestSet_3NI(cn, sess, accuracy, sum_test, loss, softmax,
placeholder1, placeholder1_input,
placeholder2, placeholder2_input,
placeholder3, placeholder3_input,
placeholder_labels, placeholder_labels_input, afc=None):
'''Evalute the data with 3 network inputs in the session input
Inputs:
sess:
accuracy:
sum_test:
loss:
softmax:
Outputs:
v_accuracy:
valid_loss:
oaa:
confu_mat'''
ncount=len(placeholder_labels_input)
tlabels=[]
if ncount>TestNumLimit:
test_iter=np.floor_divide(ncount,test_bat)
v_accuracy=0
valid_loss=0
for ite in range(test_iter):
start=test_bat*ite
end=test_bat*(ite+1)
st, v_loss, tlab=sess.run([sum_test, loss, softmax], feed_dict={placeholder1:placeholder1_input[start:end],
placeholder2:placeholder2_input[start:end],
placeholder3:placeholder3_input[start:end],
placeholder_labels:placeholder_labels_input[start:end]})
v_accuracy=v_accuracy+st
valid_loss=valid_loss+v_loss
tlabels.extend(tlab)
if ncount%test_bat>0:
st, v_loss, tlab=sess.run([sum_test, loss, softmax], feed_dict={placeholder1:placeholder1_input[test_bat*test_iter:ncount],
placeholder2:placeholder2_input[test_bat*test_iter:ncount],
placeholder3:placeholder3_input[test_bat*test_iter:ncount],
placeholder_labels:placeholder_labels_input[test_bat*test_iter:ncount]})
tlabels.extend(tlab)
v_accuracy=v_accuracy+st
valid_loss=valid_loss+v_loss
v_accuracy=v_accuracy/ncount
valid_loss=valid_loss/(test_iter+1)
else:
v_accuracy, valid_loss, tlab = sess.run([accuracy, loss, softmax], feed_dict={placeholder1:placeholder1_input,
placeholder2:placeholder2_input,
placeholder3:placeholder3_input,
placeholder_labels:placeholder_labels_input})
tlabels.extend(tlab)
confu_mat=calR(tlabels, placeholder_labels_input, cn)
oaa=overAllAccuracy(confu_mat, afc=afc)
return v_accuracy, valid_loss, oaa, confu_mat
def logfile(file_record, runs, OAA, afc, valid_loss, valid_min_loss, final_train_loss, train_min_loss, TA, TC, ILR, FLR, LS, ites, Epo, cBS, iBS, input, CM, T, df):
file_record="Run%02d\tOverAllACC:%0.8f\tTestAccuracy:%.8f\tACs: %s\tFinalLoss:%.10f\tMinimunLoss:%.10f\tFinaltrainloss:%.10f\tMinimumtrainloss:%.10f\tTimeComsumed:%08.6f\tInitialLearningRate:%.8f\tFinalLearningRate:%.8f\tLearningStepForDroppingMagnitude:%08d\tTotalIterations:%08d\tEpoches:%08d\tcurrentBatchSize:%05d\tinitialBatchSize:%05d\tInput:%s\t%s\tTime:%s\tDataFile:%s"%(runs,
OAA, TA, str(afc), valid_loss, valid_min_loss, final_train_loss, train_min_loss, TC, ILR,FLR, LS,ites,Epo,cBS,iBS,str(input),str(CM),time.strftime('%Y%m%d%H%M%S',T),df)
return file_record
def logfileV2(file_record, runs, V_string, final_train_loss, train_min_loss, TC, ILR, FLR, LS, ites, Epo, cBS, iBS, input, CMstring, T, df):
file_record="Run%02d\t%s\tFinaltrainloss:%.10f\tMinimumtrainloss:%.10f\tTimeComsumed:%08.6f\tInitialLearningRate:%.8f\tFinalLearningRate:%.8f\tLearningStepForDroppingMagnitude:%08d\tTotalIterations:%08d\tEpoches:%08d\tcurrentBatchSize:%05d\tinitialBatchSize:%05d\tInput:%s\t%s\tTime:%s\tDataFile:%s"%(runs,
V_string, final_train_loss, train_min_loss, TC, ILR,FLR, LS,ites,Epo,cBS,iBS,str(input),str(CMstring),time.strftime('%Y%m%d%H%M%S',T),df)
return file_record
def logfileForSklearnModel(file_record, runs, model, TA, OAA, CM, df, train_ac, toaa, tcm):
modelstring=''
for v in str(model).splitlines():
modelstring=modelstring+v
file_record='Run%02d\tOverAllACC:%.8f\tTestAccuracy:%.8f\tTrainOAA:%.8f\tTrainAC:%.8f\tinput:%s\tCM:%s\tTCM:%s\t%s\t%s'%(runs, OAA, TA, toaa, train_ac, (sys.argv), str(CM), str(tcm), df, modelstring)
return file_record
def load(data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path).item()
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def restorefacepatchModel(TrainID, sess, NetworkType, graph):
vl=graph.get_collection(name='trainable_variables')
saver1=None
saver2=None
saver3=None
if NetworkType==4:
for v in vl:
if M3N4S1.get(v.name, -1)==0:
#print(M3N4S1[v.name])
M3N4S1[v.name]=v
#print(M3N4S1[v.name])
#exit(9)
elif M3N4S2.get(v.name, -1)==0:
M3N4S2[v.name]=v
elif M3N4S3.get(v.name, -1)==0:
M3N4S3[v.name]=v
saver1=tf.train.Saver(M3N4S1)
saver2=tf.train.Saver(M3N4S2)
saver3=tf.train.Saver(M3N4S3)
if TrainID%100>30:
saver1.restore(sess, './FPPTM/EyePatch_TrainonD502_TestonD531_N4_R4_20171025123948_1.59218006134_.ckpt')#OverAllACC:0.56836735 TestAccuracy:0.56836735 FinalLoss:1.5921800613
saver2.restore(sess, './FPPTM/MiddlePatch_TrainonD502_TestonD531_N4_R4_20171025113147_1.68774459362_.ckpt')#OverAllACC:0.46938776 TestAccuracy:0.46938776 FinalLoss:1.6877445936
saver3.restore(sess, './FPPTM/MouthPatch_TrainonD502_TestonD531_N4_R8_20171025144404_1.57691563368_.ckpt')#OverAllACC:0.58367347 TestAccuracy:0.58367347 FinalLoss:1.5769156337
elif TrainID%100<20:
saver1.restore(sess, './FPPTM/EyePatch_TrainonD532_TestonD501_N4_R9_20171019103910_1.51784744629_only_trainable_variables.ckpt')#OverAllACC:0.57857271 TestAccuracy:0.65412330 FinalLoss:1.5178474463
saver2.restore(sess, './FPPTM/MiddlePatch_TrainonD532_TestonD501_N4_R11_20171019080535_1.66863813767_only_trainable_variables.ckpt')#OverAllACC:0.44420250 TestAccuracy:0.49079263 FinalLoss:1.6686381377
saver3.restore(sess, './FPPTM/MouthPatch_TrainonD532_TestonD501_N4_R1_20171018224312_1.42820624205_only_trainable_variables.ckpt')#OverAllACC:0.68346248 TestAccuracy:0.74299440 FinalLoss:1.4282062420
else:
print('Unexpected case occurred when loading pretrain model in restorefacepatchModel')
exit(-1)
elif NetworkType==5:#for discrimination, N3 under tflearn was replaced as N5
for v in vl:
if M3N5S1.get(v.name, -1)==0:
M3N5S1[v.name]=v
elif M3N5S2.get(v.name, -1)==0:
M3N5S2[v.name]=v
elif M3N5S3.get(v.name, -1)==0:
M3N5S3[v.name]=v
saver1=tf.train.Saver(M3N5S1)
saver2=tf.train.Saver(M3N5S2)
saver3=tf.train.Saver(M3N5S3)
if TrainID%100>30:
saver1.restore(sess, './FPPTM/EyePatch_TrainonD502_TestonD531_N3_R10_20171102144530_1.5524974227_.ckpt')#Run10 OverAllACC:0.61836735 TestAccuracy:0.61836735 FinalLoss:1.5524974227
saver2.restore(sess, './FPPTM/MiddlePatch_TrainonD502_TestonD531_N3_R7_20171102190719_1.69338421822_.ckpt')#Run07 OverAllACC:0.46428571 TestAccuracy:0.46428571 FinalLoss:1.6933842182
saver3.restore(sess, './FPPTM/MouthPatch_TrainonD502_TestonD531_N3_R14_20171103033147_1.55810719728_.ckpt')#Run14 OverAllACC:0.60612245 TestAccuracy:0.60612245 FinalLoss:1.5581071973
elif TrainID%100<20:
saver1.restore(sess, './FPPTM/EyePatch_TrainonD532_TestonD501_N3_R0_20171102203504_1.5470389036_.ckpt')#Run00 OverAllACC:0.58779029 TestAccuracy:0.61569255 FinalLoss:1.5470389036
saver2.restore(sess, './FPPTM/MiddlePatch_TrainonD532_TestonD501_N3_R14_20171102201934_1.65476641288_.ckpt')#Run14 OverAllACC:0.46619803 TestAccuracy:0.51401121 FinalLoss:1.6547664129 MinimunLoss:1.6547664129
saver3.restore(sess, './FPPTM/MouthPatch_TrainonD532_TestonD501_N3_R9_20171102141218_1.41499766937_.ckpt')#Run09 OverAllACC:0.69564812 TestAccuracy:0.76220977 FinalLoss:1.4149976694
else:
print('Unexpected case occurred when loading pretrain model in restorefacepatchModel')
exit(-1)
else:
exit(3)
def restorevggModel(sess, NetworkType, graph):
vl=graph.get_collection(name='trainable_variables')
if NetworkType==10 or NetworkType==11 or NetworkType==12:
data_dict=np.load('./networkmodel/VGGFACE.npy').item()
#print(type(data_dict))
#print(len(data_dict))
##print(data_dict)
#for name in data_dict:
# print(name)
for v in vl:
#print(v.name)
namescope=v.name.split('/')[0]
var=v.name.split('/')[1]
val=data_dict.get(namescope, None)
#print(v.name, namescope, var, var.find('W:0'), var.find('b:0'), type(val))
if val==None:
continue
elif var.find('W:0')>-1:
shape=val['weights'].shape
#print(shape)
if shape[2]==3:
val['weights']=np.reshape(val['weights'][:,:,1,:],[shape[0], shape[1], 1, shape[3]])
sess.run(v.assign(val['weights']))
print('Variable %s restored'%(v.name))
elif var.find('b:0')>-1:
#shape=val['biases'].shape
sess.run(v.assign(val['biases']))
print('Variable %s restored'%(v.name))
else:
continue
else:
exit(3)
def loadPretrainedModel(NetworkType, network, session, module):
#if NetworkType==4 or NetworkType==0 or NetworkType==1 or NetworkType==2 or NetworkType==3:
if module==1:
if NetworkType==4 or NetworkType<10:
try:
print("Loading pretrained network model: VGGFACE.npy......")
network.load('./networkmodel/VGGFACE.npy', session, ignore_missing=True)
print('\nPreserved Model of VGGFACE was loaded.\n')
except:
print('ERROR: unable to load pretrain network weights')
traceback.print_exc()
exit(-1)
else:
print('No pretrain network weights are fit to the current network type. Please try another network type.')
exit()
elif module==4:
if NetworkType==4 or NetworkType<9:
try:
print("Loading pretrained network model: VGGFACE.npy......")
network.load('./networkmodel/VGGFACE.npy', session, ignore_missing=True)
print('\nPreserved Model of VGGFACE was loaded.\n')
except:
print('ERROR: unable to load pretrained VGGFACE network weights')
traceback.print_exc()
exit(-1)
elif NetworkType==30:
try:
print("Loading pretrained network model: ResNet50.npy......")
network.load('./networkmodel/ResNet50.npy', session, ignore_missing=True)
print('\nPreserved Model of ResNet50 was loaded.\n')
except:
print('ERROR: unable to load pretrained ResNet50 network weights')
traceback.print_exc()
exit(-1)
elif NetworkType==33:
try:
print("Loading pretrained network model: AlexNetoxford102.npy......")
network.load('./networkmodel/AlexNetoxford102.npy', session, ignore_missing=True)
print('\nPreserved Model of AlexNetoxford102 was loaded.\n')
except:
print('ERROR: unable to load pretrained AlexNetoxford102 network weights')
traceback.print_exc()
exit(-1)
else:
print('No pretrain network weights are fit to the current network type. Please try another network type.')
exit()
else:
print('Module %d has no pretrained model embedded. Please try another module or check the input again.'%(module))
exit()
Datasets = collections.namedtuple('Datasets', ['train', 'test', 'validation'])
def groupdata(Apredata, ValidID, TestID):
'''This function will delete the contents in Apredata.
Please be careful when you use it.'''
nl=len(Apredata)
train={'X':[], 'Y':[]}
test={'X':[], 'Y':[]}
valid={'X':[], 'Y':[]}
for i in range(nl):
if i==int(TestID):
test['X'].extend(Apredata[i]['X'])
del Apredata[i]['X']
test['Y'].extend(Apredata[i]['Y'])
del Apredata[i]['Y']
if ValidID==TestID:
valid=test
elif i==int(ValidID):
valid['X'].extend(Apredata[i]['X'])
del Apredata[i]['X']
valid['Y'].extend(Apredata[i]['Y'])
del Apredata[i]['Y']
else:
train['X'].extend(Apredata[i]['X'])
del Apredata[i]['X']
train['Y'].extend(Apredata[i]['Y'])
del Apredata[i]['Y']
return Datasets(train=train, test=test, validation=valid)
def multiprocessingUnitForModule8tests(metrics, sst, model_save_path, runs, t1, test_run,
NetworkType, data,facepatchpreprocessdatafilename, log,
n_estimators, min_samples_split, min_samples_leaf):
ct=time.time()
m8_model_save_path=model_save_path.replace('_R'+str(runs)+time.strftime('_%Y%m%d%H%M%S',time.localtime(t1)),
'_R'+str(test_run)+time.strftime('_%Y%m%d%H%M%S',time.localtime(ct)))
logpostfix='_E%d_MSS%d_MSL%d_'%(n_estimators, min_samples_split, min_samples_leaf)
if NetworkType%10==0:
from sklearn import tree
optm = tree.DecisionTreeClassifier(criterion='entropy', min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf)
elif NetworkType%10==1:
from sklearn import tree
optm = tree.DecisionTreeClassifier(criterion='gini', min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf)
elif NetworkType%10==2:
from sklearn.ensemble import RandomForestClassifier
optm = RandomForestClassifier(n_estimators=n_estimators, criterion='entropy',
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf)
elif NetworkType%10==3:
from sklearn.ensemble import RandomForestClassifier
optm = RandomForestClassifier(n_estimators=n_estimators, criterion='gini',
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf)
else:
print('ERROR:::::$$$$: Unexpected networktype encount.')
exit(-1)
m8_model_save_path=m8_model_save_path.replace('.ckpt', '_%s.ckpt'%(type(optm).__name__))
optm.fit(data.train['X'], data.train['Y'])
tY=optm.predict(data.train['X'])
train_acc=metrics.accuracy_score(np.asarray(data.train['Y']), tY)
tcm=calR(tY, data.train['Y'],cn)
toaa=overAllAccuracy(tcm)
pY=optm.predict(data.test['X'])
#print(pY.shape)
#print((np.asarray(data.test['Y'])).shape)
accuracy=metrics.accuracy_score(np.asarray(data.test['Y']), pY)
cm=calR(pY, data.test['Y'],cn)
oaa=overAllAccuracy(cm)
tt=time.time()
print('OT:%2d\tOAA:%.8f\tAcc:%.8f\tTOAA:%.8f\tTAc:%.8f\t%s\tT:%fs'%(test_run, oaa, accuracy, toaa, train_acc, str(type(optm).__name__),(tt-ct)))
sst.addFigure(oaa)
file_record=logfileForSklearnModel(file_record,test_run, optm, accuracy, oaa, cm, facepatchpreprocessdatafilename, train_acc, toaa, tcm)
#loss_a.setMinimun_loss(oaa)
modelname=m8_model_save_path.replace('.ckpt','_%s_.pkl'%(str(oaa)))
with open(modelname, 'wb') as fin:
pickle.dump(optm, fin, 4)
tt=time.time()
logf=log.replace('.txt',('_'+str(type(optm).__name__)+logpostfix+'.txt'))
filelog=open(logf,'a')
filelog.write('%s\t\t TotalTimeConsumed: %f\tOptimizer: %s\n'%(file_record, (tt-ct), str(type(optm).__name__)))
filelog.close()
return oaa
def savelistcontent(filename, list):
fw=open(filename, 'w')
for v in list:
fw.write('%s\n'%(str(v)))
fw.close()
def run(GPU_Device_ID, Module,
DataSet,ValidID,TestID,
NetworkType, runs
,cLR=0.0001,batchSize=15,loadONW=False,reshape=False):
try:
initialize_dirs()
'''GPU Option---------------------------------------------------------------------------------------------
Determine which GPU is going to be used
------------------------------------------------------------------------------------------------------------'''
print('GPU Option: %s'%(GPU_Device_ID))
if (0==GPU_Device_ID) or (1==GPU_Device_ID):
os.environ["CUDA_VISIBLE_DEVICES"]=str(GPU_Device_ID)
errorlog='./logs/errors_gpu'+str(GPU_Device_ID)+'.txt'
templog='./logs/templogs_newSC_gpu'+str(GPU_Device_ID)+'_M'+str(Module)+'_D'+str(DataSet)+'.txt'
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nGPUID must be 0 or 1\nModule must be 1, 2, or 3\nNetworkType must be 0, 1, 2, 3")
exit(-1)
'''GPU Option ENDS---------------------------------------------------------------------------------------'''
cn=7#category numbers
if int(DataSet)>60000:
cn=6
if int(DataSet==66505):
cn=7
mini_loss=10000
loss_a=LOSS_ANA()
file_record=None
t1=time.time()
logprefix='./logs/'
model_save_path=''
labelshape=[None, cn]
m1shape= [None, 128, 128, 1]
global Mini_Epochs
#
#
#
'''Input Data-------------------------------------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
#
##data set loading
#
D_f=False
if Module==2 and NetworkType<3:
D_f=True
dfile=Dataset_Dictionary.get(DataSet, False)
if dfile==False:
print('\nERROR: Unexpected DatasetID %d encouted.\n\n'%(int(DataSet)))
exit(-1)
logprefix="./logs/D%d_gpu"%(DataSet)
if Module==7:
print('Module 7: Face patches and Geometry')
elif Module==8:
print('Module 8: Face pathces cnn outputs')
else:
if Module==2 and NetworkType>9:
data = DataSetPrepare.loadCKplus10gdata_v4(dfile, ValidID, TestID, Module=Module, Df=False,reshape=False, one_hot=False, cn=cn)
else:
#data = DataSetPrepare.loadCKplus10gdata_v2(dfile, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
data = DataSetPrepare.loadCKplus10gdata_v4(dfile, ValidID, TestID, Module=Module, Df=D_f, reshape=reshape, cn=cn)
if DataSet==2:
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==3:
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==4:
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==5:
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==6:
m2d=258
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==7:
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==8:
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==9:
m1shape= [None, 224, 224, 1]
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==10:
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==11:
m1shape= [None, 224, 224, 1]
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==12:
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==13:
m2d=258
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==15:
dfilet=Dataset_Dictionary.get(10)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
batchSize=60
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==16:
dfilet=Dataset_Dictionary.get(10)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
if runs%2==0:
batchSize=30
else:
batchSize=15
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==17:
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==18:
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==19:
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==33:
batchSize=35
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==32:
dfilet=Dataset_Dictionary.get(33)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
batchSize=70
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==34:
dfilet=Dataset_Dictionary.get(33)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
batchSize=70
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==42:
dfilet=Dataset_Dictionary.get(40)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
batchSize=60
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==40:
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==43:
dfilet=Dataset_Dictionary.get(40)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
batchSize=60
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==111:
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==222:
dfilet=Dataset_Dictionary.get(111)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==333:
dfilet=Dataset_Dictionary.get(444)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==444:
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==501:
if runs%2==0:
batchSize=30
else:
batchSize=15
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==502:
dfilet=Dataset_Dictionary.get(501)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID,Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
if runs%2==0:
batchSize=30
else:
batchSize=15
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==503:
if runs%2==0:
batchSize=30
else:
batchSize=15
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==531:
if runs%2==0:
batchSize=15
else:
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==532:
dfilet=Dataset_Dictionary.get(531)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
if runs%2==0:
batchSize=15
else:
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==551:
if runs%2==0:
batchSize=21
else:
batchSize=42
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==552:
if runs%2==0:
batchSize=21
else:
batchSize=42
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==553:
if runs%2==0:
batchSize=21
else:
batchSize=42
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==554:
if runs%2==0:
batchSize=21
else:
batchSize=42
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==610:
if runs%3==0:
batchSize=35
elif runs%3==1:
batchSize=70
else:
batchSize=128
Mini_Epochs=Mini_Epochs*2
cLR=0.00001
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==611:
if runs%3==0:
batchSize=35
elif runs%3==1:
batchSize=70
else:
batchSize=128
Mini_Epochs=Mini_Epochs*2
cLR=0.00001
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==620:
if runs%3==0:
batchSize=35
elif runs%3==1:
batchSize=70
else:
batchSize=128
Mini_Epochs=Mini_Epochs*2
cLR=0.00001
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==621:
if runs%3==0:
batchSize=35
elif runs%3==1:
batchSize=70
else:
batchSize=128
Mini_Epochs=Mini_Epochs*2
cLR=0.00001
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==1001:
if runs%2==0:
batchSize=30
else:
batchSize=15
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==1002:
dfilet=Dataset_Dictionary.get(1001)
datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
if runs%2==0:
batchSize=30
else:
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66501:
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66502:
dfilet=Dataset_Dictionary.get(66501)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID,Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66503:
cLR=0.001
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66504:
#cLR=0.001
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66505:
#cLR=0.001
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66531:
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66532:
dfilet=Dataset_Dictionary.get(66531)
#datatest = DataSetPrepare.loadCKplus10gdata_v2(dfilet, ValidID, TestID, Df=D_f,reshape=reshape, cn=cn)
datatest = DataSetPrepare.loadCKplus10gdata_v4(dfilet, ValidID, TestID, Module=Module, Df=D_f,reshape=reshape, cn=cn)
print('Before reset: %d'%data.test.num_examples)
data.test.reset(datatest.test.res_images, datatest.test.geometry,
datatest.test.eyep, datatest.test.middlep, datatest.test.mouthp, datatest.test.innerf,
datatest.test.labels)
data.validation.reset(datatest.validation.res_images, datatest.validation.geometry,
datatest.validation.eyep, datatest.validation.middlep, datatest.validation.mouthp, datatest.validation.innerf,
datatest.validation.labels)
print('After reset: %d'%data.test.num_examples)
del datatest
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66554:
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66555:
batchSize=30
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==66610:
if runs%2==0:
batchSize=30
elif runs%2==1:
batchSize=60
cLR=0.00001
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==66611:
if runs%2==0:
batchSize=30
elif runs%2==1:
batchSize=60
cLR=0.00001
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==66620:
if runs%2==0:
batchSize=30
elif runs%2==1:
batchSize=60
cLR=0.00001
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==66621:
if runs%2==0:
batchSize=30
elif runs%2==1:
batchSize=60
cLR=0.00001
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
else:
print('ERROR: Unexpeted Dataset ID')
exit()
#
lrstep=int(data.train.num_examples/batchSize*times)
print('\nlearning rate decay steps: %d'%lrstep)
#
tt=time.time()
if reshape:
logprefix=logprefix+'_reshape64x64'
if Module==6:
log=logprefix+str(GPU_Device_ID)+"_M"+str(Module)+"_D"+str(DataSet)+"_N"+str(NetworkType)+"_newStopCriteriaV3.txt"
elif loadONW:
log=logprefix+str(GPU_Device_ID)+"_M"+str(Module)+"_D"+str(DataSet)+"_N"+str(NetworkType)+"_withPretrainModelWeight_newStopCriteriaV3.txt"
else:
log=logprefix+str(GPU_Device_ID)+"_M"+str(Module)+"_D"+str(DataSet)+"_N"+str(NetworkType)+"_noPretrain_newStopCriteriaV3.txt"
#logfilename=time.strftime('%Y%m%d%H%M%S',time.localtime(tt))+str(sys.argv[2:4])
print('Time used for loading data: %fs'%(tt-t1))
if os.path.exists("J:/Models/saves/"):
model_save_path=("J:/Models/saves/"+'M'+str(Module)+'/D'+str(DataSet)+'/N'+str(NetworkType)+'/')
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
model_save_path=(model_save_path+'D'+str(DataSet)+'_M'+str(Module)+'_N'+str(NetworkType)+'_T'+str(TestID)+'_V'+str(ValidID)+'_R'
+str(runs)+time.strftime('_%Y%m%d%H%M%S',time.localtime(t1))+".ckpt")
else:
model_save_path=("./saves/"+'M'+str(Module)+'/D'+str(DataSet)+'/N'+str(NetworkType)+'/')
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
model_save_path=(model_save_path+'D'+str(DataSet)+'_M'+str(Module)+'_N'+str(NetworkType)+'_T'+str(TestID)+'_V'+str(ValidID)+'_R'
+str(runs)+time.strftime('_%Y%m%d%H%M%S',time.localtime(t1))+".ckpt")
'''Input Data Ends-----------------------------------------------------------------------------------------'''
#
#
#
if reshape:
m1shape=[None, 64, 64, 1]
print('Module 1 images input shape has been set to %s'%str(m1shape))
model_save_path=model_save_path.replace(".ckpt", "_reshape.ckpt")
#
#
#
if Module==1 and NetworkType==10 or NetworkType==4:
cLR=0.00002
if loadONW==False:
lrstep=14000
global_step = tf.Variable(0, trainable=False)
lr=tf.train.exponential_decay(cLR, global_step, lrstep, lr_drate, staircase=True)
if Module==1:
stcmwvlilttv=1.19#save_the_current_model_when_validation_loss_is_less_than_this_value
if DataSet==554 or DataSet==551 or DataSet==552 or DataSet==553:
stcmwvlilttv=1.7
'''MODULE1----------------------------------------------------------------------------------------------------
Options for the whole-face-network
Only need to select one of the import options as the network for the whole face feature extraction.
-------------------------------------------------------------------------------------------------------------'''
print('Network Type: %s'%(NetworkType))
if NetworkType==0:
from VGG_NET import VGG_NET_20l_512o as WFN
elif NetworkType==1:
from VGG_NET import VGG_NET_20l_128o as WFN
elif NetworkType==2:
from VGG_NET import VGG_NET_16l_128o as WFN
elif NetworkType==3:
from VGG_NET import VGG_NET_16l_72o as WFN
elif NetworkType==4:
from VGG_NET import VGG_NET_o as WFN
elif NetworkType==8:
from VGG_NET import VGG_NET_Inception1 as WFN
elif NetworkType==9:
from VGG_NET import VGG_NET_Inception2 as WFN
elif NetworkType==10:
from VGG_NET import VGG_NET_O_tfl as WFN
elif NetworkType==11:
from VGG_NET import VGG_NET_I5 as WFN
elif NetworkType==12:
from VGG_NET import VGG_NET_I5_ELU as WFN
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWith Module 1, NetworkType must be 0, 1, 2, 3")
exit(-1)
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
#Holder for gray images with m1shape in a batch size of batch_size
images = tf.placeholder(tf.float32, m1shape)
#Holder for labels in a batch size of batch_size, number of labels are to be determined
labels = tf.placeholder(tf.float32, labelshape)#the number of labels are to be determined
if NetworkType==10 or NetworkType==11 or NetworkType==12:
Mini_Epochs = 40
softmax=WFN(images)
else:
whole_face_net = WFN({'data':images})
softmax=whole_face_net.layers['prob']
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=softmax),0)
#optm=tf.train.RMSPropOptimizer(lr)
optm=tf.train.AdamOptimizer(lr)
train_op=optm.minimize(loss,global_step=global_step)#for train
#for test
correcta_prediction = tf.equal(tf.argmax(softmax,1),tf.argmax(labels,1))
test_cast=tf.cast(correcta_prediction, "float")
sum_test=tf.reduce_sum(test_cast)#for large test set
accuracy = tf.reduce_mean(test_cast)#for small test set
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if loadONW:
if NetworkType==10 or NetworkType==11 or NetworkType==12:
restorevggModel(sess, NetworkType, tf.get_default_graph())
else:
loadPretrainedModel(NetworkType, whole_face_net, sess,Module)
print('Model has been restored.\n')
#exit(-1)
saver = tf.train.Saver()
iters=int((data.train.num_examples*Mini_Epochs)/batchSize)+1
for i in range(iters):
afc=[]
batch=data.train.next_batch(batchSize, shuffle=False)
tloss, _=sess.run([loss, train_op], feed_dict={images:batch[0], labels:batch[5]})
if tloss<mini_loss:
mini_loss=tloss
v_accuracy, valid_loss, oaa, confu_mat = Valid_on_TestSet(cn, sess, accuracy, sum_test, loss, softmax,
images, data.test.res_images, labels, data.test.labels,afc=afc)
laflag = loss_a.analyzeLossVariation(valid_loss)
clr=cLR*(lr_drate)**(i//lrstep)
tt=time.time()
print("CLR:%.8f Ite:%06d Bs:%03d Epo:%04d Los:%.8f mLo:%08f\tVALID>> mVL: %.8f\tVL: %.8f\tVA: %f\tOAA: %f\tT: %fs"%
(clr,i,batchSize,data.train.epochs_completed, tloss, mini_loss, loss_a.minimun_loss, valid_loss, v_accuracy, oaa, (tt-t1)))
if laflag:
file_record = logfile(file_record, runs=runs, OAA=oaa, afc=afc, valid_loss=valid_loss, valid_min_loss=loss_a.minimun_loss,
final_train_loss=tloss, train_min_loss=mini_loss, TA=v_accuracy, TC=(tt-t1),ILR=cLR, FLR=clr, LS=lrstep, ites=i,
Epo=data.train.epochs_completed, cBS=batchSize, iBS=batchSize,
input=sys.argv, CM=confu_mat, T=time.localtime(tt), df=dfile)
if loss_a.minimun_loss < stcmwvlilttv:
saver.save(sess=sess, save_path=model_save_path)
'''MODULE1 ENDS---------------------------------------------------------------------------------------------'''
#
#
#
elif Module==2:
#stcmwvlilttv=1.1854#value need to be determined. save_the_current_model_when_validation_loss_is_less_than_this_value
#'''MODULE2----------------------------------------------------------------------------------------------------
#Options for the Geometry-network
#Only need to select one of the import options as the network for the geometry feature extraction.
#-------------------------------------------------------------------------------------------------------------'''
#print('Geometry Network Type: %s'%(NetworkType))
#if NetworkType==0:
# from Geometric_NET import Geometric_NET_2c2l as GeN
#elif NetworkType==1:
# from Geometric_NET import Geometric_NET_2c2lcc1 as GeN
#elif NetworkType==2:
# from Geometric_NET import Geometric_NET_2c2lcc1l1 as GeN
#elif NetworkType==3:
# from Geometric_NET import Geometric_NET_1h as GeN
#elif NetworkType==4:
# from Geometric_NET import Geometric_NET_2h1I as GeN
#elif NetworkType==5:
# from Geometric_NET import Geometric_NET_3h1I as GeN
# clr=0.00001
# learningRate=0.00001
#elif NetworkType==6:
# from Geometric_NET import Geometric_NET_h1I as GeN
#else:
# print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWith Module 2, NetworkType must be 0, 1, 2")
# exit(-1)
#'''Here begins the implementation logic-------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------'''
##Holder for geometry features with 122 in a batch size of batch_size
#if D_f:
# geo_features = tf.placeholder(tf.float32, [None, m2d, 1])
#else:
# geo_features = tf.placeholder(tf.float32, [None, m2d])
##Holder for labels in a batch size of batch_size, number of labels are to be determined
#labels = tf.placeholder(tf.float32, labelshape)#the number of labels are to be determined
#Geometry_net = GeN({'data':geo_features})
#print(type(Geometry_net))
#softmax=tf.nn.softmax('prob')
#loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=softmax),0)
#optm=tf.train.RMSPropOptimizer(lr)
##optm=tf.train.RMSPropOptimizer(lr)
#train_op=optm.minimize(loss)#for train
##for test
#correcta_prediction = tf.equal(tf.argmax(softmax,1),tf.argmax(labels,1))
#accuracy = tf.reduce_mean(tf.cast(correcta_prediction, "float"))
#with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# saver = tf.train.Saver()
#'''MODULE2 ENDS---------------------------------------------------------------------------------------------'''
from sklearn import metrics
'''MODULE2----------------------------------------------------------------------------------------------------
Options for the Geometry features
-------------------------------------------------------------------------------------------------------------'''
print('Network Type: %s'%(NetworkType))
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
overtimes=1
if continue_test:
overtimes=OverTimes
nel=[7, 10, 14, 18, 21, 25, 28, 32]
mssl=[4, 8, 10, 14, 18, 21, 27, 32]
msll=[1, 2, 3, 5, 8, 10, 14, 18, 24, 27]
loopflag=False
log=log.replace('./logs','./logs/M%dtests'%(Module))#use for tuning
for v_nel in nel:
if loopflag:
break
if NetworkType==10 or NetworkType==11:
loopflag=True
for v_mss in mssl:
for v_msl in msll:
n_estimators=v_nel#10, estimators for random forest classifier
min_samples_split=v_mss#10
min_samples_leaf=v_msl#5
#n_estimators=14#10, estimators for random forest classifier
#min_samples_split=10#10
#min_samples_leaf=5#5
print('n_estimators(RFC):%d\tmin_samples_split:%d\tmin_samples_leaf:%d'%(n_estimators,
min_samples_split, min_samples_leaf))
sst=SIMSTS(overtimes)
for test_run in range(overtimes):
ct=time.time()
m7_model_save_path=model_save_path.replace('_R'+str(runs)+time.strftime('_%Y%m%d%H%M%S',time.localtime(t1)),
'_R'+str(test_run)+time.strftime('_%Y%m%d%H%M%S',time.localtime(ct)))
if NetworkType==10:
from sklearn import tree
optm = tree.DecisionTreeClassifier(criterion='entropy', min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf)
logpostfix=''
elif NetworkType==11:
from sklearn import tree
optm = tree.DecisionTreeClassifier(criterion='gini', min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf)
logpostfix=''
elif NetworkType==12:
from sklearn.ensemble import RandomForestClassifier
optm = RandomForestClassifier(n_estimators=n_estimators, criterion='entropy',
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf)
logpostfix='_E%d'%(n_estimators)
elif NetworkType==13:
from sklearn.ensemble import RandomForestClassifier
optm = RandomForestClassifier(n_estimators=n_estimators, criterion='gini',
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf)
logpostfix='_E%d'%(n_estimators)
else:
print('ERROR:::::$$$$: Unexpected networktype encount.')
exit(-1)
logpostfix=logpostfix+'_MSS%d_MSL%d_'%(min_samples_split, min_samples_leaf)
m7_model_save_path=m7_model_save_path.replace('.ckpt', '_%s.ckpt'%(type(optm).__name__))
#print(type(data.train.geometry))
#print(len(data.train.geometry))
#print(type(data.train.labels))
#print(data.train.labels.shape)
optm.fit(data.train.geometry, data.train.labels)
tY=optm.predict(data.train.geometry)
train_acc=metrics.accuracy_score(data.train.labels, tY)
tcm=calR(tY, data.train.labels)
toaa=overAllAccuracy(tcm)
pY=optm.predict(data.test.geometry)
accuracy=metrics.accuracy_score(data.test.labels, pY)
cm=calR(pY, data.test.labels)
oaa=overAllAccuracy(cm)
tt=time.time()
print('OT:%2d\tOAA:%.8f\tAcc:%.8f\tTOAA:%.8f\tTAc:%.8f\t%s\tT:%fs'%(test_run, oaa, accuracy, toaa, train_acc, str(type(optm).__name__),(tt-ct)))
sst.addFigure(oaa)
file_record=logfileForSklearnModel(file_record,test_run, optm, accuracy, oaa, cm, dfile, train_acc, toaa, tcm)
loss_a.setMinimun_loss(oaa)
modelname=m7_model_save_path.replace('.ckpt','_%s_.pkl'%(str(oaa)))
with open(modelname, 'wb') as fin:
pickle.dump(optm, fin, 4)
tt=time.time()
logf=log.replace('.txt',('_'+str(type(optm).__name__)+logpostfix+'.txt'))
filelog=open(logf,'a')
filelog.write('%s\t\t TotalTimeConsumed: %f\tOptimizer: %s\n'%(file_record, (tt-ct), str(type(optm).__name__)))
filelog.close()
state=sst.getSTS()
print('Mean:%f\tMax:%f\tMin:%f'%(state[0], state[1], state[2]))
sst.logfile(Module, DataSet, NetworkType, n_estimators, min_samples_split, min_samples_leaf)
'''MODULE2 ENDS---------------------------------------------------------------------------------------------'''
#
#
#
elif Module==3:
stcmwvlilttv=1.2154#value need to be determined. save_the_current_model_when_validation_loss_is_less_than_this_value
if DataSet==502 or DataSet==501:
stcmwvlilttv=1.1854
elif DataSet==532 or DataSet==531:
stcmwvlilttv=1.1904
elif DataSet==554 or DataSet==551 or DataSet==552 or DataSet==553:
stcmwvlilttv=1.7
elif DataSet>60000:
stcmwvlilttv=1.045
'''MODULE3----------------------------------------------------------------------------------------------------
Options for the face_patches-network
-------------------------------------------------------------------------------------------------------------'''
print('FacePatch Network Type: %s'%(NetworkType))
if NetworkType==0:
from FacePatches_NET import FacePatches_NET_2Inceptions as PaN
elif NetworkType==1:
from FacePatches_NET import FacePatches_NET_2Inceptions_4lrn as PaN
elif NetworkType==2:
from FacePatches_NET import FacePatches_NET_2Inceptions_4lrn2 as PaN
elif NetworkType==3:
from FacePatches_NET import FacePatches_NET_3Conv_2Inception as PaN
elif NetworkType==4:
#from FacePatches_NET import FacePatches_NET_3Conv_1Inception as PaN
from FacePatches_NET import FacePatches_NET_3Conv_IInception_tflear as PaN
elif NetworkType==5:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn_5 as PaN
stcmwvlilttv=0.00022
elif NetworkType==6:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn as PaN
elif NetworkType==7:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn_ELU as PaN
elif NetworkType==8:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn_8 as PaN
elif NetworkType==9:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn_9 as PaN
elif NetworkType==10:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn_10 as PaN
elif NetworkType==11:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn_11 as PaN
elif NetworkType==12:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn_12 as PaN
elif NetworkType==24:
from FacePatches_NET import FacePatches_NET_3C_1I_2P as PaN
elif NetworkType==25:
from FacePatches_NET import FacePatches_NET_3C_2I_2P as PaN
elif NetworkType==26:
from FacePatches_NET import FacePatches_NET_3C_3I_2P as PaN
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWith Module 2, NetworkType must be 0, 1")
exit(-1)
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
#Holders for gray images
eye_p_shape=[None, 26, 64, 1]
midd_p_shape=[None, 49, 28, 1]
mou_p_shape=[None, 30, 54, 1]
eye_p = tf.placeholder(tf.float32, eye_p_shape)
midd_p = tf.placeholder(tf.float32, midd_p_shape)
mou_p = tf.placeholder(tf.float32, mou_p_shape)
#Holder for labels in a batch size of batch_size, number of labels are to be determined
labels = tf.placeholder(tf.float32, labelshape)#the number of labels are to be determined
#FacePatch_net = PaN({'eyePatch_data':eye_p, 'middlePatch_data':midd_p, 'mouthPatch_data':mou_p})
#print(type(FacePatch_net))
#softmax=FacePatch_net.layers['prob']
if NetworkType > 3 and NetworkType < 13:###current 4 5 6 7
softmax=PaN(eye_p, midd_p, mou_p, classNo=cn)
elif NetworkType >23 and NetworkType <27:###using only eye patch and mouth patch
softmax=PaN(eye_p, mou_p, classNo=cn)
else:
FacePatch_net = PaN({'eyePatch_data':eye_p, 'middlePatch_data':midd_p, 'mouthPatch_data':mou_p})
print(type(FacePatch_net))
softmax=FacePatch_net.layers['prob']
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=softmax),0)
#optm=tf.train.RMSPropOptimizer(lr)
optm=tf.train.AdamOptimizer(lr)
train_op=optm.minimize(loss, global_step)#for train
#for test
correcta_prediction = tf.equal(tf.argmax(softmax,1),tf.argmax(labels,1))
test_cast=tf.cast(correcta_prediction, "float")
sum_test=tf.reduce_sum(test_cast)#for large test set
accuracy = tf.reduce_mean(test_cast)#for small test set
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if loadONW:
#print('\n\n>>>>>>>>>>>>>>all collection keys')
#print(tf.get_default_graph().get_all_collection_keys())
#savelistcontent('./M3_all_collection_keys.txt',tf.get_default_graph().get_all_collection_keys())
#print('\n\n>>>>>>>>>>>>>>all variables')
#print(tf.get_default_graph().get_collection(name='variables'))
#savelistcontent('./M3_all_variables.txt',tf.get_default_graph().get_collection(name='variables'))
#print('\n\n>>>>>>>>>>>>>>all train_op')
#print(tf.get_default_graph().get_collection(name='train_op'))
#savelistcontent('./M3_train_op.txt',tf.get_default_graph().get_collection(name='train_op'))
#print('\n\n>>>>>>>>>>>>>>all trainable variables')
#print(tf.get_default_graph().get_collection(name='trainable_variables'))
#savelistcontent('./M3_trainable_variables_n5.txt', tf.get_default_graph().get_collection(name='trainable_variables'))
#exit(2)
restorefacepatchModel(DataSet, sess, NetworkType, tf.get_default_graph())
print('\nModels have been loaded.\n')
iters=int((data.train.num_examples*Mini_Epochs)/batchSize)+1
for i in range(iters):
afc=[]
batch=data.train.next_batch(batchSize, shuffle=False)
tloss, _=sess.run([loss, train_op], feed_dict={eye_p:batch[2], midd_p:batch[3], mou_p:batch[4], labels:batch[5]})
if tloss<mini_loss:
mini_loss=tloss
v_accuracy, valid_loss, oaa, confu_mat = Valid_on_TestSet_3NI(cn, sess, accuracy, sum_test, loss, softmax,
eye_p, data.test.eyep, midd_p, data.test.middlep,
mou_p, data.test.mouthp, labels, data.test.labels, afc=afc)
laflag = loss_a.analyzeLossVariation(valid_loss)
clr=cLR*(lr_drate)**(i//lrstep)
tt=time.time()
print("CLR:%.8f Ite:%06d Bs:%03d Epo:%04d Los:%.8f mLo:%08f\tVALID>> mVL: %.8f\tVL: %.8f\tVA: %f\tOAA: %f\tT: %fs"%
(clr,i,batchSize,data.train.epochs_completed, tloss, mini_loss, loss_a.minimun_loss, valid_loss, v_accuracy, oaa, (tt-t1)))
if laflag:
file_record = logfile(file_record, runs=runs, OAA=oaa, afc=afc, valid_loss=valid_loss, valid_min_loss=loss_a.minimun_loss,
final_train_loss=tloss, train_min_loss=mini_loss, TA=v_accuracy, TC=(tt-t1),ILR=cLR, FLR=clr, LS=lrstep, ites=i,
Epo=data.train.epochs_completed, cBS=batchSize, iBS=batchSize,
input=sys.argv, CM=confu_mat, T=time.localtime(tt), df=dfile)
if loss_a.minimun_loss < stcmwvlilttv:
saver.save(sess=sess, save_path=model_save_path)
'''MODULE3 ENDS---------------------------------------------------------------------------------------------'''
#
#
#
elif Module==6:
stcmwvlilttv=1.4054#value need to be determined. save_the_current_model_when_validation_loss_is_less_than_this_value
'''MODULE6----------------------------------------------------------------------------------------------------
Options for the fusion net of vgg inner_face and geometry input
-------------------------------------------------------------------------------------------------------------'''
print('Network Type: %s'%(NetworkType))
if NetworkType==440:
from Geometric_NET import Geometric_NET_2h1I as GEON
geonfcdim=1024
from VGG_NET import VGG_NET_o as APPN
appnfcdim=4096
from FintuneNet import FTN0 as FTN
elif NetworkType==441:
from Geometric_NET import Geometric_NET_2h1I as GEON
geonfcdim=1024
from VGG_NET import VGG_NET_o as APPN
appnfcdim=4096
from FintuneNet import FTN1 as FTN
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWrong NetworkType, please check the NetworkType input again.")
exit(-1)
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
#define geometry graph
geo_G=tf.Graph()
with geo_G.as_default():
geo_features=tf.placeholder(tf.float32, [None,122])
geo_net=GEON({'data':geo_features})
geofc=geo_net.layers['gefc2']
#print(geo_G.get_all_collection_keys())
#print(geo_G.get_collection(name='trainable_variables'))
#print(geo_G.get_collection(name='variables'))
gsaver = tf.train.Saver()
#exit()
#define appearance graph
app_G=tf.Graph()
with app_G.as_default():
images = tf.placeholder(tf.float32, m1shape)
app_net=APPN({'data':images})
appfc=app_net.layers['fc2']
asaver = tf.train.Saver()
#define fine-tuning graph
fint_G=tf.Graph()
with fint_G.as_default():
geo_fc=tf.placeholder(tf.float32, [None, geonfcdim])
app_fc=tf.placeholder(tf.float32, [None, appnfcdim])
labels = tf.placeholder(tf.float32, labelshape)#the number of labels are to be determined
fin_net=FTN({'appfc':app_fc, 'geofc':geo_fc})
softmax=tf.nn.softmax('prob')
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=softmax),0)
optm=tf.train.RMSPropOptimizer(lr)
train_op=optm.minimize(loss)#for train
#for test
correcta_prediction = tf.equal(tf.argmax(softmax,1),tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correcta_prediction, "float"))
#print(fint_G.get_all_collection_keys())
#print(fint_G.get_collection(name='variables'))
#print(fint_G.get_collection(name='train_op'))
#print(fint_G.get_collection(name='trainable_variables'))
#exit()
print('Geometry graph at: \t\t', geo_G)
print('Appearance graph at: \t\t', app_G)
print('Fine-tuning graph at: \t\t', fint_G)
#exit()
#different sessions have different graph
geo_sess=tf.InteractiveSession(graph=geo_G)
app_sess=tf.InteractiveSession(graph=app_G)
fin_sess=tf.InteractiveSession(graph=fint_G)
print('\n%%%%%%%Sessions are created\n')
try:
#must initialize the variables in the graph for compution or loading pretrained weights
geo_sess.run(tf.variables_initializer(var_list=geo_G.get_collection(name='variables')))
print('\nGeometry network variables initialized.')
#the gsaver must define in the graph of its owner session, or it will occur error in restoration or saving
gsaver.restore(sess=geo_sess, save_path=selectGeoModelPathForModule6_8G(TestID=TestID))
print('Geometry Model loaded')
except:
print('Unable to load the pretrained network for geo_net')
traceback.print_exc()
try:
#must initialize the variables in the graph for compution or loading pretrained weights
app_sess.run(tf.variables_initializer(var_list=app_G.get_collection(name='variables')))
print('\nAppearance network variables initialized.')
#the asaver must define in the graph of its owner session, or it will occur error in restoration or saving
asaver.restore(sess=app_sess, save_path=selectAppModelPathForModule6_8G(TestID=TestID))
print('Appearance Model loaded\n')
except:
print('Unable to load the pretrained network for app_net')
traceback.print_exc()
exit(2)
#exit()
try:
#besides the variables, the optimizer also need to be initialized.
#fin_sess.run(tf.variables_initializer(var_list=fint_G.get_collection(name='trainable_variables')))
fin_sess.run(tf.variables_initializer(var_list=fint_G.get_collection(name='variables')))
saver = tf.train.Saver()
print('\nFine-tuning network variables initialized.')
except:
print('Unable to initialize Fine-tuning network variables')
traceback.print_exc()
exit(3)
'''MODULE6 ENDS---------------------------------------------------------------------------------------------'''
#
#
#face patch CNN and Geometry original features fusion
elif Module==7:
from sklearn import metrics
stcmwvlilttv=1.4054#value need to be determined. save_the_current_model_when_validation_loss_is_less_than_this_value
'''MODULE7----------------------------------------------------------------------------------------------------
Options for the fusion net of face patches and geometry input
-------------------------------------------------------------------------------------------------------------'''
print('Network Type: %s'%(NetworkType))
if NetworkType//10==6:#using network 6 in face patches, get fusion_1 layer output
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn as FPN
fpndim=9526
#m3modelname='./M7models/D502_M3_N6_T2_V2_R1_20171110055149_1.18062_.ckpt'
m3modelname='./M7models/D502_M3_N6_T2_V2_R1_20171110055149_1.18062_.ckpt'
facepatchpreprocessdatafilename='./Pre-Datasets/D%d_N%dinM3_pre-data_with_%ddims_from_%s.pkl'%(DataSet,6,fpndim,os.path.basename(m3modelname))
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWrong NetworkType, please check the NetworkType input again.")
exit(-1)
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
###load data from
print('Checking path:\n%s\n'%(facepatchpreprocessdatafilename))
if os.path.exists(facepatchpreprocessdatafilename):
print('Loading data from previous generated file......')
with open(facepatchpreprocessdatafilename, 'rb') as datafile:
Apredata=pickle.load(datafile)
else:
print('Generating data......')
#define appearance graph
fp_G=tf.Graph()
with fp_G.as_default():
eye_p_shape=[None, 26, 64, 1]
midd_p_shape=[None, 49, 28, 1]
mou_p_shape=[None, 30, 54, 1]
eye_p = tf.placeholder(tf.float32, eye_p_shape)
midd_p = tf.placeholder(tf.float32, midd_p_shape)
mou_p = tf.placeholder(tf.float32, mou_p_shape)
softmax=FPN(eye_p, midd_p, mou_p)
fpsaver = tf.train.Saver()
fusion1=tflearn.get_layer_by_name('fusion_1')
print('Facepatches graph at: \t\t', fp_G)
#exit()
#different sessions have different graph
fp_sess=tf.InteractiveSession(graph=fp_G)
print('\n%%%%%%%Sessions are created\n')
try:
#must initialize the variables in the graph for compution or loading pretrained weights
fp_sess.run(tf.variables_initializer(var_list=fp_G.get_collection(name='variables')))
print('\nFace Patches network variables initialized.')
#the gsaver must define in the graph of its owner session, or it will occur error in restoration or saving
fpsaver.restore(sess=fp_sess, save_path=m3modelname)
print('Face Patches Network Model loaded')
except:
print('Unable to load the pretrained network for geo_net')
traceback.print_exc()
exit()
data10g=DataSetPrepare.loadPKLDataWithPartitions_v4(Dataset_Dictionary.get(DataSet), Geometry=True, Patches=True, cn=cn)
Apredata=[]
print('Data contains %d groups.'%len(data10g))
for dg in data10g:
predata={'X':[], 'Y':[]}
fpeval=[]
ncount=len(dg['labels'])
print('Processing data with %d samples.'%(ncount))
#print(dg['eye_patch'][0].shape)
if ncount>TestNumLimit:
iters=np.floor_divide(ncount, test_bat)
print(iters)
for ite in range(iters):
#print(ite)
start=test_bat*ite
end=test_bat*(ite+1)
fcd=fusion1.eval(feed_dict={eye_p:dg['eye_patch'][start:end], midd_p:dg['middle_patch'][start:end],
mou_p:dg['mouth_patch'][start:end]})
fpeval.extend(fcd)
del fcd
if ncount%test_bat>0:
fcd=fusion1.eval(feed_dict={eye_p:dg['eye_patch'][test_bat*iters:ncount], midd_p:dg['middle_patch'][test_bat*iters:ncount],
mou_p:dg['mouth_patch'][test_bat*iters:ncount]})
fpeval.extend(fcd)
del fcd
else:
fcd=fusion1.eval(feed_dict={eye_p:dg['eye_patch'], midd_p:dg['middle_patch'], mou_p:dg['mouth_patch']})
fpeval.extend(fcd)
del fcd
for index_extend in range(ncount):
predata['X'].append(np.append(fpeval[index_extend], dg['geometry'][index_extend]))
del fpeval
predata['Y'].extend(dg['labels'])
del dg['labels'], dg['geometry'], dg['eye_patch'], dg['mouth_patch'], dg['middle_patch']
print('%d samples with %d dims.\n'%(len(predata['Y']), len(predata['X'][0])))
Apredata.append(predata)
del predata
del data10g
with open(facepatchpreprocessdatafilename, 'wb') as fin:
pickle.dump(Apredata, fin, 4)
print('File saved.')
#print(Apredata)
#exit()
data=groupdata(Apredata, ValidID, TestID)
overtimes=1
if continue_test:
overtimes=OverTimes
#nel=[7, 10, 14, 18, 21, 25, 28, 32]
#mssl=[4, 8, 10, 14, 18, 21, 27, 32]
#msll=[1, 2, 3, 5, 8, 10, 14, 18, 24, 27]#, should not exceed 5. for this subject
#loopflag=False
#log=log.replace('./logs','./logs/M%dtests'%(Module))#use for tuning
#for v_nel in nel:
# if loopflag:
# break
# if NetworkType==60 or NetworkType==61:
# loopflag=True
# for v_mss in mssl:
# for v_msl in msll:
# n_estimators=v_nel#10, estimators for random forest classifier
# min_samples_split=v_mss#10
# min_samples_leaf=v_msl#5, should not exceed 5. for this subject
#n_estimators=14#10, estimators for random forest classifier
#min_samples_split=10#10
#min_samples_leaf=5#5, should not exceed 5. for this subject
sst=SIMSTS(overtimes)
for test_run in range(overtimes):
ct=time.time()
m7_model_save_path=model_save_path.replace('_R'+str(runs)+time.strftime('_%Y%m%d%H%M%S',time.localtime(t1)),
'_R'+str(test_run)+time.strftime('_%Y%m%d%H%M%S',time.localtime(ct)))
if NetworkType%10==0:
from sklearn import tree
optm = tree.DecisionTreeClassifier(criterion='entropy', min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf)
elif NetworkType%10==1:
from sklearn import tree
optm = tree.DecisionTreeClassifier(criterion='gini', min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf)
elif NetworkType%10==2:
n_estimators=14#10, estimators for random forest classifier
min_samples_split=4#10
min_samples_leaf=5#5, should not exceed 5. for this subject
from sklearn.ensemble import RandomForestClassifier
optm = RandomForestClassifier(n_estimators=n_estimators, criterion='entropy',
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf)
elif NetworkType%10==3:
n_estimators=32#10, estimators for random forest classifier
min_samples_split=4#10
min_samples_leaf=5#5, should not exceed 5. for this subject
from sklearn.ensemble import RandomForestClassifier
optm = RandomForestClassifier(n_estimators=n_estimators, criterion='gini',
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf)
else:
print('ERROR:::::$$$$: Unexpected networktype encount.')
exit(-1)
if test_run==0:
print('n_estimators(RFC):%d\tmin_samples_split:%d\tmin_samples_leaf:%d'%(n_estimators,
min_samples_split, min_samples_leaf))
logpostfix='_E%d_MSS%d_MSL%d_'%(n_estimators, min_samples_split, min_samples_leaf)
m7_model_save_path=m7_model_save_path.replace('.ckpt', '_%s.ckpt'%(type(optm).__name__))
optm.fit(data.train['X'], data.train['Y'])
tY=optm.predict(data.train['X'])
train_acc=metrics.accuracy_score(np.asarray(data.train['Y']), tY)
tcm=calR(tY, data.train['Y'])
toaa=overAllAccuracy(tcm)
pY=optm.predict(data.test['X'])
#print(pY.shape)
#print((np.asarray(data.test['Y'])).shape)
accuracy=metrics.accuracy_score(np.asarray(data.test['Y']), pY)
cm=calR(pY, data.test['Y'])
oaa=overAllAccuracy(cm)
tt=time.time()
print('OT:%2d\tOAA:%.8f\tAcc:%.8f\tTOAA:%.8f\tTAc:%.8f\t%s\tT:%fs'%(test_run, oaa, accuracy, toaa, train_acc, str(type(optm).__name__),(tt-ct)))
sst.addFigure(oaa)
file_record=logfileForSklearnModel(file_record,test_run, optm, accuracy, oaa, cm, facepatchpreprocessdatafilename, train_acc, toaa, tcm)
loss_a.setMinimun_loss(oaa)
modelname=m7_model_save_path.replace('.ckpt','_%s_.pkl'%(str(oaa)))
with open(modelname, 'wb') as fin:
pickle.dump(optm, fin, 4)
tt=time.time()
logf=log.replace('.txt',('_'+str(type(optm).__name__)+logpostfix+'.txt'))
filelog=open(logf,'a')
filelog.write('%s\t\t TotalTimeConsumed: %f\tOptimizer: %s\n'%(file_record, (tt-ct), str(type(optm).__name__)))
filelog.close()
state=sst.getSTS()
print('Mean:%f\tMax:%f\tMin:%f'%(state[0], state[1], state[2]))
sst.logfile(Module, DataSet, NetworkType, n_estimators, min_samples_split, min_samples_leaf)
'''MODULE7 ENDS---------------------------------------------------------------------------------------------'''
#
#
#face patch CNN features
elif Module==8:
from sklearn import metrics
#from multiprocessing import pool
stcmwvlilttv=1.4054#value need to be determined. save_the_current_model_when_validation_loss_is_less_than_this_value
'''MODULE8----------------------------------------------------------------------------------------------------
Options for the fusion net of face patches and geometry input
-------------------------------------------------------------------------------------------------------------'''
print('Network Type: %s'%(NetworkType))
if NetworkType//10==6:#using network 6 in face patches, get fusion_1 layer output
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn as FPN
fpndim=9216
#m3modelname='./M7models/D502_M3_N6_T2_V2_R1_20171110055149_1.18062_.ckpt'
m3modelname='./M7models/D502_M3_N6_T2_V2_R1_20171110055149_1.18062_.ckpt'
facepatchpreprocessdatafilename='./Pre-Datasets/D%d_N%dinM3_pre-data_with_%ddims_from_%s.pkl'%(DataSet,6,fpndim,os.path.basename(m3modelname))
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWrong NetworkType, please check the NetworkType input again.")
exit(-1)
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
###load data from
print('Checking path:\n%s\n'%(facepatchpreprocessdatafilename))
if os.path.exists(facepatchpreprocessdatafilename):
print('Loading data from previous generated file......')
with open(facepatchpreprocessdatafilename, 'rb') as datafile:
Apredata=pickle.load(datafile)
else:
print('Generating data......')
#define appearance graph
fp_G=tf.Graph()
with fp_G.as_default():
eye_p_shape=[None, 26, 64, 1]
midd_p_shape=[None, 49, 28, 1]
mou_p_shape=[None, 30, 54, 1]
eye_p = tf.placeholder(tf.float32, eye_p_shape)
midd_p = tf.placeholder(tf.float32, midd_p_shape)
mou_p = tf.placeholder(tf.float32, mou_p_shape)
softmax=FPN(eye_p, midd_p, mou_p)
fpsaver = tf.train.Saver()
fusion1=tflearn.get_layer_by_name('fusion_1')
print('Facepatches graph at: \t\t', fp_G)
#exit()
#different sessions have different graph
fp_sess=tf.InteractiveSession(graph=fp_G)
print('\n%%%%%%%Sessions are created\n')
try:
#must initialize the variables in the graph for compution or loading pretrained weights
fp_sess.run(tf.variables_initializer(var_list=fp_G.get_collection(name='variables')))
print('\nFace Patches network variables initialized.')
#the gsaver must define in the graph of its owner session, or it will occur error in restoration or saving
fpsaver.restore(sess=fp_sess, save_path=m3modelname)
print('Face Patches Network Model loaded')
except:
print('Unable to load the pretrained network for geo_net')
traceback.print_exc()
exit()
data10g=DataSetPrepare.loadPKLDataWithPartitions_v4(Dataset_Dictionary.get(DataSet), Patches=True, cn=cn)
Apredata=[]
print('Data contains %d groups.'%len(data10g))
for dg in data10g:
predata={'X':[], 'Y':[]}
fpeval=[]
ncount=len(dg['labels'])
print('Processing data with %d samples.'%(ncount))
#print(dg['eye_patch'][0].shape)
if ncount>TestNumLimit:
iters=np.floor_divide(ncount, test_bat)
print(iters)
for ite in range(iters):
#print(ite)
start=test_bat*ite
end=test_bat*(ite+1)
fcd=fusion1.eval(feed_dict={eye_p:dg['eye_patch'][start:end], midd_p:dg['middle_patch'][start:end],
mou_p:dg['mouth_patch'][start:end]})
fpeval.extend(fcd)
del fcd
if ncount%test_bat>0:
fcd=fusion1.eval(feed_dict={eye_p:dg['eye_patch'][test_bat*iters:ncount], midd_p:dg['middle_patch'][test_bat*iters:ncount],
mou_p:dg['mouth_patch'][test_bat*iters:ncount]})
fpeval.extend(fcd)
del fcd
else:
fcd=fusion1.eval(feed_dict={eye_p:dg['eye_patch'], midd_p:dg['middle_patch'], mou_p:dg['mouth_patch']})
fpeval.extend(fcd)
del fcd
for index_extend in range(ncount):
#predata['X'].append(np.append(fpeval[index_extend], dg['geometry'][index_extend]))
predata['X'].append(np.asarray(fpeval[index_extend]))
del fpeval
predata['Y'].extend(dg['labels'])
del dg['labels'], dg['eye_patch'], dg['mouth_patch'], dg['middle_patch']
print('%d samples with %d dims.\n'%(len(predata['Y']), len(predata['X'][0])))
Apredata.append(predata)
del predata
del data10g
with open(facepatchpreprocessdatafilename, 'wb') as fin:
pickle.dump(Apredata, fin, 4)
print('File saved.')
#print(Apredata)
#exit()
data=groupdata(Apredata, ValidID, TestID)
overtimes=1
if continue_test:
overtimes=OverTimes
#nel=[7, 10, 14, 18, 21, 25, 28, 32]
#mssl=[2, 4, 8, 10, 14, 18, 21, 27, 32]
#msll=[1, 2, 3, 5, 8, 10]
##nel=[10, 14, 18, 21, 25, 28, 32]
##mssl=[10, 14, 18, 21, 27, 32]
##msll=[3]
#loopflag=False
#log=log.replace('./logs','./logs/M%dtests/details'%(Module))#use for tuning
#for v_nel in nel:
# if loopflag:
# break
# if NetworkType==60 or NetworkType==61:
# loopflag=True
# for v_mss in mssl:
# for v_msl in msll:
#n_estimators=v_nel#10, estimators for random forest classifier
#min_samples_split=v_mss#10
#min_samples_leaf=v_msl#5
sst=SIMSTS(overtimes)
for test_run in range(overtimes):
ct=time.time()
m8_model_save_path=model_save_path.replace('_R'+str(runs)+time.strftime('_%Y%m%d%H%M%S',time.localtime(t1)),
'_R'+str(test_run)+time.strftime('_%Y%m%d%H%M%S',time.localtime(ct)))
if NetworkType%10==0:
from sklearn import tree
optm = tree.DecisionTreeClassifier(criterion='entropy', min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf)
elif NetworkType%10==1:
from sklearn import tree
optm = tree.DecisionTreeClassifier(criterion='gini', min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf)
elif NetworkType%10==2:
n_estimators=49#10, estimators for random forest classifier
min_samples_split=5#10
min_samples_leaf=3#5
max_depth=50
oob_score=True
from sklearn.ensemble import RandomForestClassifier
optm = RandomForestClassifier(n_estimators=n_estimators, criterion='entropy',
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf,
max_depth=max_depth, oob_score=oob_score)
elif NetworkType%10==3:
n_estimators=21#10, estimators for random forest classifier
min_samples_split=4#10
min_samples_leaf=2#5
from sklearn.ensemble import RandomForestClassifier
optm = RandomForestClassifier(n_estimators=n_estimators, criterion='gini',
min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf)
else:
print('ERROR:::::$$$$: Unexpected networktype encount.')
exit(-1)
logpostfix='_E%d_MSS%d_MSL%d_'%(n_estimators, min_samples_split, min_samples_leaf)
if test_run==0:
print('n_estimators(RFC):%d\tmin_samples_split:%d\tmin_samples_leaf:%d'%(n_estimators,
min_samples_split, min_samples_leaf))
m8_model_save_path=m8_model_save_path.replace('.ckpt', '_%s.ckpt'%(type(optm).__name__))
optm.fit(data.train['X'], data.train['Y'])
tY=optm.predict(data.train['X'])
train_acc=metrics.accuracy_score(np.asarray(data.train['Y']), tY)
tcm=calR(tY, data.train['Y'])
toaa=overAllAccuracy(tcm)
pY=optm.predict(data.test['X'])
#print(pY.shape)
#print((np.asarray(data.test['Y'])).shape)
accuracy=metrics.accuracy_score(np.asarray(data.test['Y']), pY)
cm=calR(pY, data.test['Y'])
oaa=overAllAccuracy(cm)
tt=time.time()
print('OT:%2d\tOAA:%.8f\tAcc:%.8f\tTOAA:%.8f\tTAc:%.8f\t%s\tT:%fs'%(test_run, oaa, accuracy, toaa, train_acc, str(type(optm).__name__),(tt-ct)))
sst.addFigure(oaa)
file_record=logfileForSklearnModel(file_record,test_run, optm, accuracy, oaa, cm, facepatchpreprocessdatafilename, train_acc, toaa, tcm)
loss_a.setMinimun_loss(oaa)
modelname=m8_model_save_path.replace('.ckpt','_%s_.pkl'%(str(oaa)))
with open(modelname, 'wb') as fin:
pickle.dump(optm, fin, 4)
tt=time.time()
logf=log.replace('.txt',('_'+str(type(optm).__name__)+logpostfix+'.txt'))
filelog=open(logf,'a')
filelog.write('%s\t\t TotalTimeConsumed: %f\tOptimizer: %s\n'%(file_record, (tt-ct), str(type(optm).__name__)))
filelog.close()
'''n_estimators=10#10, estimators for random forest classifier
min_samples_split=18#10
min_samples_leaf=10#5
#freeze_support()
pool_processes=pool.Pool(processes=8)
apply_result_list=[]
for test_run in range(overtimes):
apply_result_list.append(pool_processes.apply_async(multiprocessingUnitForModule8tests,
(metrics, pickle, sst, model_save_path, runs, t1, test_run,
NetworkType, data,facepatchpreprocessdatafilename, log,
n_estimators, min_samples_split, min_samples_leaf,)))
pool_processes.close()
pool_processes.join()
for v in apply_result_list:
sst.addFigure(v.get())'''
state=sst.getSTS()
print('Mean:%f\tMax:%f\tMin:%f'%(state[0], state[1], state[2]))
sst.logfile(Module, DataSet, NetworkType, n_estimators, min_samples_split, min_samples_leaf)
'''MODULE8 ENDS---------------------------------------------------------------------------------------------'''
if not Module==7 and not Module==8:
#newmodelname=model_save_path.split('.ckpt')[0]+'_'+str(loss_a.minimun_loss)+'_.ckpt'
newmodelname=model_save_path.replace('.ckpt','_%s_.ckpt'%(str(loss_a.minimun_loss)))
if os.path.exists(model_save_path+'.data-00000-of-00001'):
os.rename((model_save_path+'.data-00000-of-00001'),(newmodelname+'.data-00000-of-00001'))
os.rename((model_save_path+'.index'),(newmodelname+'.index'))
os.rename((model_save_path+'.meta'),(newmodelname+'.meta'))
tt=time.time()
log=log.replace('.txt',('_'+str(type(optm).__name__)+'.txt'))
filelog=open(log,'a')
filelog.write('%s\t\t TotalTimeConsumed: %f\tOptimizer: %s\n'%(file_record, (tt-t1), str(type(optm).__name__)))
filelog.close()
print(log)
print(log.split('.txt')[0])
losslog=log.split('.txt')[0]+'_Runs%d_%d_%d'%(runs, ValidID, TestID)+'.validationlosslist'
losslog=losslog.replace('./logs/','./logs/VL/')
loss_a.outputlosslist(losslog)
except:
try:
if not Module==7 and not Module==8:
tt=time.time()
log=log.replace('.txt',('_'+str(type(optm).__name__)+'.txt'))
filelog=open(log,'a')
filelog.write('%s\t\t TotalTimeConsumed: %f\tOptimizer: %s\n'%(file_record, (tt-t1), str(type(optm).__name__)))
filelog.close()
print('\n\n>>>>>> Saving current run info after it crrupted or interrupted.\n\n')
print(log)
print(log.split('.txt')[0])
losslog=log.split('.txt')[0]+'_Runs%d_%d_%d'%(runs, ValidID, TestID)+'.validationlosslist'
losslog=losslog.replace('./logs/','./logs/VL/')
loss_a.outputlosslist(losslog)
print('>>>>>> Current run info has been saved after it crrupted or interrupted.\n\n')
except:
print('ERROR: Fail to save current run info. after it crrupted')
ferror=open(errorlog,'w')
traceback.print_exc()
traceback.print_exc(file=ferror)
ferror.close()
def second_save(model_save_path, model_save_path_second):
if os.path.exists(model_save_path+'.data-00000-of-00001'):
if os.path.exists(model_save_path_second+'.data-00000-of-00001'):
os.remove(model_save_path_second+'.data-00000-of-00001')
os.remove(model_save_path_second+'.index')
os.remove(model_save_path_second+'.meta')
os.rename((model_save_path+'.data-00000-of-00001'),(model_save_path_second+'.data-00000-of-00001'))
os.rename((model_save_path+'.index'),(model_save_path_second+'.index'))
os.rename((model_save_path+'.meta'),(model_save_path_second+'.meta'))
return True
def runWithTestPKL(GPU_Device_ID, Module,
DataSet,PKLList,
NetworkType, runs
,cLR=0.0001,batchSize=15,loadONW=False,reshape=False):
try:
initialize_dirs()
'''GPU Option---------------------------------------------------------------------------------------------
Determine which GPU is going to be used
------------------------------------------------------------------------------------------------------------'''
print('GPU Option: %s'%(GPU_Device_ID))
if (0==GPU_Device_ID) or (1==GPU_Device_ID):
os.environ["CUDA_VISIBLE_DEVICES"]=str(GPU_Device_ID)
errorlog='./logs/errors_gpu'+str(GPU_Device_ID)+'.txt'
templog='./logs/templogs_newSC_gpu'+str(GPU_Device_ID)+'_M'+str(Module)+'_D'+str(DataSet)+'.txt'
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nGPUID must be 0 or 1\nModule must be 1, 2, or 3\nNetworkType must be 0, 1, 2, 3")
exit(-1)
'''GPU Option ENDS---------------------------------------------------------------------------------------'''
cn=7#category numbers
if int(DataSet)>60000:
cn=6
lrstep=6000
mini_loss=10000
file_record=None
t1=time.time()
logprefix='./logs/'
model_save_path=''
labelshape=[None, 7]
m1shape= [None, 128, 128, 1]
if DataSet>500:
m2d=310
else:
m2d=122
global Mini_Epochs
global show_threshold
#
#
#
'''Input Data-------------------------------------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
#
##data set loading
#
D_f=False
if Module==2 and NetworkType<3:
D_f=True
dfile=Dataset_Dictionary.get(DataSet, False)
if dfile==False:
print('\nERROR: Unexpected DatasetID %d encouted.\n\n'%(int(DataSet)))
exit(-1)
train_data = DataSetPrepare.loadPKLData_v4(dfile, Module, Df=D_f,reshape=reshape, cn=cn)
PKLList=PKLList.split(',')
print('Data to be tested: ', PKLList)
testIDstr=''
loss_a=[]
test_data_list=[]
laflag=[]
pkl_test_num=len(PKLList)
for v in PKLList:
if Dataset_Dictionary.get(int(v), False)==False:
print('\nWARNING: Unexpected DatasetID %d encouted.\n\n'%(int(v)))
continue
testIDstr=testIDstr+'D'+str(v)
loss_a.append(LOSS_ANA())
test_data_list.append(DataSetPrepare.loadPKLData_v4(Dataset_Dictionary.get(int(v)), Module, Df=D_f, reshape=reshape, cn=cn))
laflag.append(False)
if DataSet==2:
logprefix="./logs/D2CKplus_newrescalemetric_8groups_gpu"
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==3:
logprefix="./logs/D3CKpluslogbslr_weberface_8groups_gpu"
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==4:
logprefix="./logs/D4CKpluslogbslr_weberReverse_8groups_gpu"
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==5:
logprefix="./logs/D5CKpluslogbslr_weberface25up_8groups_gpu"
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==6:
m2d=258
logprefix="./logs/D6CKplus_GeoFeatureV2_8groups_gpu"
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==7:
logprefix="./logs/D7CKpluslogbslr_weberface_innerface48x36_8groups_gpu"
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==8:
logprefix="./logs/D8CKpluslogbslr_ELTFS_8groups_gpu"
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==9:
m1shape= [None, 224, 224, 1]
logprefix="./logs/D9CKpluslogbslr_weberface224_8groups_gpu"
print("Processing 8 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==10:
logprefix="./logs/D10CKpluslogbslr_weberface_10groups_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==11:
m1shape= [None, 224, 224, 1]
logprefix="./logs/D11CKpluslogbslr_weberface224_10groups_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==12:
logprefix="./logs/D12CKpluslogbslr_ELTFS_10groups_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==13:
m2d=258
logprefix="./logs/D13_CKplus_8G_V4_Geo258_ELTFS128x128_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==15:
logprefix="./logs/D15_CKPLUS_10G_EnlargebyWEF_testonoriginal_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==16:
if runs%2==0:
batchSize=30
else:
batchSize=15
logprefix="./logs/D16_CKPLUS_10G_Enlargeby2015CCV_10T_testonoriginal_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==17:
logprefix="./logs/D17_CKplus_10G_V4_weberface128x128_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==18:
logprefix="./logs/D18_CKplus_10G_V5_formalized_weberface128x128_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==19:
logprefix="./logs/D19_CKplus_10G_V4_ELTFS128x128_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==33:
batchSize=35
logprefix="./logs/D33_KDEF_weberface_10groups_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==32:
batchSize=70
logprefix="./logs/D32_KDEF_10G_EnlargebyWEF_testonoriginal_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==34:
batchSize=70
logprefix="./logs/D34_KDEF_10G_Enlargeby2015CCV_10T_testonoriginal_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==42:
batchSize=60
logprefix="./logs/D42_JAFFE_10G_Enlargeby_WEF_testonoriginaldataset_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==40:
logprefix="./logs/D40_JAFFE_10G_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==43:
batchSize=60
logprefix="./logs/D43_JAFFE_10G_Enlargeby2015CCV_10T_testonoriginaldataset_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==111:
batchSize=30
logprefix="./logs/D111_MergeDataset_D10_D33_D40_10G_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==222:
batchSize=30
logprefix="./logs/D222_MergeDataset_D16_D34_D43_10G_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==333:
batchSize=30
logprefix="./logs/D333_MergeDataset_D16_D34_10G_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==444:
batchSize=30
logprefix="./logs/D444_MergeDataset_D10_D33_10G_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==501:
if runs%2==0:
batchSize=30
else:
batchSize=15
logprefix="./logs/D501_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==502:
if runs%2==0:
batchSize=30
else:
batchSize=15
logprefix="./logs/D502_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==531:
if runs%2==0:
batchSize=15
else:
batchSize=30
logprefix="./logs/D531_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==532:
if runs%2==0:
batchSize=15
else:
batchSize=30
logprefix="./logs/D532_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==551:
if runs%2==0:
batchSize=21
else:
batchSize=15
logprefix="./logs/D551_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==552:
if runs%2==0:
batchSize=21
else:
batchSize=15
logprefix="./logs/D552_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==553:
if runs%2==0:
batchSize=21
else:
batchSize=15
logprefix="./logs/D553_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==554:
if runs%2==0:
batchSize=21
else:
batchSize=15
logprefix="./logs/D554_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==600:
if runs%2==0:
batchSize=35
else:
batchSize=70
cLR=0.00001
logprefix="./logs/D600_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==601:
if runs%2==0:
batchSize=35
else:
batchSize=70
cLR=0.00001
logprefix="./logs/D601_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==610:
if runs%3==0:
batchSize=35
elif runs%3==1:
batchSize=70
else:
batchSize=128
Mini_Epochs=Mini_Epochs*2
cLR=0.00001
logprefix="./logs/D610_gpu"
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==611:
if runs%3==0:
batchSize=35
elif runs%3==1:
batchSize=70
else:
batchSize=128
Mini_Epochs=Mini_Epochs*2
cLR=0.00001
logprefix="./logs/D611_gpu"
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==620:
if runs%3==0:
batchSize=35
elif runs%3==1:
batchSize=70
else:
batchSize=128
Mini_Epochs=Mini_Epochs*2
cLR=0.00001
logprefix="./logs/D620_gpu"
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==621:
if runs%3==0:
batchSize=35
elif runs%3==1:
batchSize=70
else:
batchSize=128
Mini_Epochs=Mini_Epochs*2
cLR=0.00001
logprefix="./logs/D621_gpu"
print("Processing dataset>>>>>>>>\n%s"%(logprefix))
elif DataSet==1001:
if runs%2==0:
batchSize=30
else:
batchSize=15
logprefix="./logs/D1001_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
elif DataSet==1002:
if runs%2==0:
batchSize=30
else:
batchSize=15
logprefix="./logs/D1002_gpu"
print("Processing 10 groups>>>>>>>>\n%s"%(logprefix))
else:
print('ERROR: Unexpeted Dataset ID')
exit()
#
#
#
tt=time.time()
if reshape:
logprefix=logprefix+'_reshape64x64'
if Module==6:
log=logprefix+str(GPU_Device_ID)+"_M"+str(Module)+"_D"+str(DataSet)+"_N"+str(NetworkType)+'_FullDataForTrainingSubjectTo_'+testIDstr+"_newStopCriteriaV3.txt"
elif loadONW:
log=logprefix+str(GPU_Device_ID)+"_M"+str(Module)+"_D"+str(DataSet)+"_N"+str(NetworkType)+'_FullDataForTrainingSubjectTo_'+testIDstr+"_withPretrainModelWeight_newStopCriteriaV3.txt"
else:
log=logprefix+str(GPU_Device_ID)+"_M"+str(Module)+"_D"+str(DataSet)+"_N"+str(NetworkType)+'_FullDataForTrainingSubjectTo_'+testIDstr+"_noPretrain_newStopCriteriaV3.txt"
#logfilename=time.strftime('%Y%m%d%H%M%S',time.localtime(tt))+str(sys.argv[2:4])
print('Time used for loading data: %fs'%(tt-t1))
if os.path.exists("J:/Models/saves/"):
model_save_path=("J:/Models/saves/"+'M'+str(Module)+'/D'+str(DataSet)+'/N'+str(NetworkType)+'/')
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
model_save_path=(model_save_path+'D'+str(DataSet)+'_M'+str(Module)+'_N'+str(NetworkType)+'_FullDataForTrainingSubjectTo_'+testIDstr+'_R'
+str(runs)+time.strftime('_%Y%m%d%H%M%S',time.localtime(t1))+".ckpt")
else:
model_save_path=("./saves/"+'M'+str(Module)+'/D'+str(DataSet)+'/N'+str(NetworkType)+'/')
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
model_save_path=(model_save_path+'D'+str(DataSet)+'_M'+str(Module)+'_N'+str(NetworkType)+'_FullDataForTrainingSubjectTo_'+testIDstr+'_R'
+str(runs)+time.strftime('_%Y%m%d%H%M%S',time.localtime(t1))+".ckpt")
model_save_path_second=model_save_path.replace('.ckpt','_second.ckpt')
'''Input Data Ends-----------------------------------------------------------------------------------------'''
#
#
#
if reshape:
m1shape=[None, 64, 64, 1]
print('Module 1 images input shape has been set to %s'%str(m1shape))
model_save_path=model_save_path.replace('.ckpt','_reshape.ckpt')
#
#
#
global_step = tf.Variable(0, trainable=False)
lr=tf.train.exponential_decay(cLR, global_step, lrstep, lr_drate, staircase=True)
if Module==1:
stcmwvlilttv_for_mutilTest=1.4674#save_the_current_model_when_validation_loss_is_less_than_this_value
if DataSet==554 or DataSet==551 or DataSet==552 or DataSet==553:
stcmwvlilttv_for_mutilTest=1.7
elif DataSet==610 or DataSet==611:
stcmwvlilttv_for_mutilTest=1.70
'''MODULE1----------------------------------------------------------------------------------------------------
Options for the whole-face-network
Only need to select one of the import options as the network for the whole face feature extraction.
-------------------------------------------------------------------------------------------------------------'''
print('Network Type: %s'%(NetworkType))
if NetworkType==0:
from VGG_NET import VGG_NET_20l_512o as WFN
elif NetworkType==1:
from VGG_NET import VGG_NET_20l_128o as WFN
elif NetworkType==2:
from VGG_NET import VGG_NET_16l_128o as WFN
elif NetworkType==3:
from VGG_NET import VGG_NET_16l_72o as WFN
elif NetworkType==4:
from VGG_NET import VGG_NET_o as WFN
elif NetworkType==8:
from VGG_NET import VGG_NET_Inception1 as WFN
elif NetworkType==9:
from VGG_NET import VGG_NET_Inception2 as WFN
elif NetworkType==10:
from VGG_NET import VGG_NET_O_tfl as WFN
elif NetworkType==11:
from VGG_NET import VGG_NET_I5 as WFN
elif NetworkType==12:
from VGG_NET import VGG_NET_I5_ELU as WFN
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWith Module 1, NetworkType must be 0, 1, 2, 3")
exit(-1)
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
#Holder for gray images with m1shape in a batch size of batch_size
images = tf.placeholder(tf.float32, m1shape)
#Holder for labels in a batch size of batch_size, number of labels are to be determined
labels = tf.placeholder(tf.float32, labelshape)#the number of labels are to be determined
if NetworkType==10 or NetworkType==11 or NetworkType==12:
Mini_Epochs = 60
softmax=WFN(images)
else:
whole_face_net = WFN({'data':images})
softmax=whole_face_net.layers['prob']
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=softmax),0)
#optm=tf.train.RMSPropOptimizer(lr)
optm=tf.train.AdamOptimizer(lr)
#print(optm.get_name())
#print(type(optm).__name__)
#exit()
train_op=optm.minimize(loss,global_step=global_step)#for train
#for test
correcta_prediction = tf.equal(tf.argmax(softmax,1),tf.argmax(labels,1))
test_cast=tf.cast(correcta_prediction, "float")
sum_test=tf.reduce_sum(test_cast)#for large test set
accuracy = tf.reduce_mean(test_cast)#for small test set
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if loadONW:
if NetworkType==10 or NetworkType==11 or NetworkType==12:
restorevggModel(sess, NetworkType, tf.get_default_graph())
else:
loadPretrainedModel(NetworkType, whole_face_net, sess,Module)
print('Model has been restored.\n')
saver = tf.train.Saver()
iters=int((train_data.num_examples*Mini_Epochs)/batchSize)+1
for i in range(iters):
afc=[]
batch=train_data.next_batch(batchSize, shuffle=False)
tloss, _=sess.run([loss, train_op], feed_dict={images:batch[0], labels:batch[5]})
if tloss<mini_loss:
mini_loss=tloss
if tloss > show_threshold:
clr=cLR*(lr_drate)**(i//lrstep)
tt=time.time()
print("CLR:%.8f Ite:%06d Bs:%03d Epo:%03d Los:%.8f mLo:%08f T:%fs"%
(clr,i,batchSize,train_data.epochs_completed, tloss, mini_loss, (tt-t1)))
else:
V_string='VALID>>'
cm_string='ConfusionMatrix>> '
for pkl_i in range(pkl_test_num):
afc=[]
v_accuracy, valid_loss, oaa, confu_mat = Valid_on_TestSet(cn, sess, accuracy, sum_test, loss, softmax,
images, test_data_list[pkl_i].res_images, labels, test_data_list[pkl_i].labels,afc=afc)
laflag[pkl_i] = loss_a[pkl_i].analyzeLossVariation(valid_loss)
V_string=V_string+'D%d OAA:%f VA:%f %s mVL:%.8f VL:%.8f '%(int(PKLList[pkl_i]), oaa, v_accuracy, str(afc), loss_a[pkl_i].minimun_loss, valid_loss)
cm_string=cm_string+'D%d:'%(int(PKLList[pkl_i]))+str(confu_mat)+' '
clr=cLR*(lr_drate)**(i//lrstep)
tt=time.time()
print("CLR:%.8f Ite:%06d Bs:%03d Epo:%03d Los:%.8f mLo:%08f %s T:%fs"%
(clr,i,batchSize,train_data.epochs_completed, tloss, mini_loss, V_string, (tt-t1)))
if laflag[0]:
file_record = logfileV2(file_record, runs=runs, V_string=V_string,
final_train_loss=tloss, train_min_loss=mini_loss, TC=(tt-t1), ILR=cLR, FLR=clr, LS=lrstep, ites=i,
Epo=train_data.epochs_completed, cBS=batchSize, iBS=batchSize,
input=sys.argv, CMstring=cm_string, T=time.localtime(tt), df=dfile)
if loss_a[0].minimun_loss < stcmwvlilttv_for_mutilTest:
second_save(model_save_path, model_save_path_second)
saver.save(sess=sess, save_path=model_save_path)
'''MODULE1 ENDS---------------------------------------------------------------------------------------------'''
#
#
#
elif Module==2:
show_threshold = 1.75
Mini_Epochs = 100
if DataSet==601:
if runs%2==0:
batchSize = 35
else:
batchSize = 70
stcmwvlilttv_for_mutilTest=1.3854#value need to be determined. save_the_current_model_when_validation_loss_is_less_than_this_value
'''MODULE2----------------------------------------------------------------------------------------------------
Options for the Geometry-network
Only need to select one of the import options as the network for the geometry feature extraction.
-------------------------------------------------------------------------------------------------------------'''
print('Geometry Network Type: %s'%(NetworkType))
if NetworkType==0:
from Geometric_NET import Geometric_NET_2c2l as GeN
elif NetworkType==1:
from Geometric_NET import Geometric_NET_2c2lcc1 as GeN
elif NetworkType==2:
from Geometric_NET import Geometric_NET_2c2lcc1l1 as GeN
elif NetworkType==3:
from Geometric_NET import Geometric_NET_1h as GeN
elif NetworkType==4:
from Geometric_NET import Geometric_NET_2h1I as GeN
elif NetworkType==5:
from Geometric_NET import Geometric_NET_3h1I as GeN
clr=0.00001
learningRate=0.00001
elif NetworkType==6:
from Geometric_NET import Geometric_NET_h1I as GeN
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWith Module 2, NetworkType must be 0, 1, 2")
exit(-1)
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
#Holder for geometry features with 122 in a batch size of batch_size
if D_f:
geo_features = tf.placeholder(tf.float32, [None, m2d, 1])
else:
geo_features = tf.placeholder(tf.float32, [None, m2d])
#Holder for labels in a batch size of batch_size, number of labels are to be determined
labels = tf.placeholder(tf.float32, labelshape)#the number of labels are to be determined
Geometry_net = GeN({'data':geo_features})
print(type(Geometry_net))
softmax=Geometry_net.layers['geprob']
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=softmax),0)
#optm=tf.train.RMSPropOptimizer(lr)
optm = tf.train.AdamOptimizer(lr)
#optm=tf.train.RMSPropOptimizer(lr)
train_op=optm.minimize(loss, global_step=global_step)#for train
#for test
correcta_prediction = tf.equal(tf.argmax(softmax,1),tf.argmax(labels,1))
test_cast=tf.cast(correcta_prediction, "float")
sum_test=tf.reduce_sum(test_cast)#for large test set
accuracy = tf.reduce_mean(test_cast)#for small test set
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
iters=int((train_data.num_examples*Mini_Epochs)/batchSize)+1
for i in range(iters):
batch=train_data.next_batch(batchSize, shuffle=False)
tloss, _=sess.run([loss, train_op], feed_dict={geo_features:batch[1], labels:batch[5]})
if tloss<mini_loss:
mini_loss=tloss
if tloss > show_threshold:
clr=cLR*(lr_drate)**(i//lrstep)
tt=time.time()
print("CLR:%.8f Ite:%06d Bs:%03d Epo:%03d Los:%.8f mLo:%08f T:%fs"%
(clr,i,batchSize,train_data.epochs_completed, tloss, mini_loss, (tt-t1)))
else:
V_string='VALID>>'
cm_string='ConfusionMatrix>> '
for pkl_i in range(pkl_test_num):
v_accuracy, valid_loss, oaa, confu_mat = Valid_on_TestSet(cn, sess, accuracy, sum_test, loss, softmax,
geo_features, test_data_list[pkl_i].geometry, labels, test_data_list[pkl_i].labels)
laflag[pkl_i] = loss_a[pkl_i].analyzeLossVariation(valid_loss)
V_string=V_string+'D%d OAA:%f VA:%f mVL:%.8f VL:%.8f '%(int(PKLList[pkl_i]), oaa, v_accuracy, loss_a[pkl_i].minimun_loss, valid_loss)
cm_string=cm_string+'D%d:'%(int(PKLList[pkl_i]))+str(confu_mat)+' '
clr=cLR*(lr_drate)**(i//lrstep)
tt=time.time()
print("CLR:%.8f Ite:%06d Bs:%03d Epo:%03d Los:%.8f mLo:%08f %s T:%fs"%
(clr,i,batchSize,train_data.epochs_completed, tloss, mini_loss, V_string, (tt-t1)))
if laflag[0]:
file_record = logfileV2(file_record, runs=runs, V_string=V_string,
final_train_loss=tloss, train_min_loss=mini_loss, TC=(tt-t1), ILR=cLR, FLR=clr, LS=lrstep, ites=i,
Epo=train_data.epochs_completed, cBS=batchSize, iBS=batchSize,
input=sys.argv, CMstring=cm_string, T=time.localtime(tt), df=dfile)
if loss_a[0].minimun_loss < stcmwvlilttv_for_mutilTest:
second_save(model_save_path, model_save_path_second)
saver.save(sess=sess, save_path=model_save_path)
'''MODULE2 ENDS---------------------------------------------------------------------------------------------'''
#
#
#
elif Module==3:
stcmwvlilttv_for_mutilTest=1.4154#value need to be determined. save_the_current_model_when_validation_loss_is_less_than_this_value
if DataSet==502 or DataSet==501:
stcmwvlilttv_for_mutilTest=1.4854
elif DataSet==532 or DataSet==531:
stcmwvlilttv_for_mutilTest=1.4004
elif DataSet==554 or DataSet==551 or DataSet==552 or DataSet==553:
stcmwvlilttv_for_mutilTest=1.7
elif DataSet==610 or DataSet==611:
stcmwvlilttv_for_mutilTest=1.7
'''MODULE3----------------------------------------------------------------------------------------------------
Options for the face_patches-network
-------------------------------------------------------------------------------------------------------------'''
print('FacePatch Network Type: %s'%(NetworkType))
if NetworkType==0:
from FacePatches_NET import FacePatches_NET_2Inceptions as PaN
elif NetworkType==1:
from FacePatches_NET import FacePatches_NET_2Inceptions_4lrn as PaN
elif NetworkType==2:
from FacePatches_NET import FacePatches_NET_2Inceptions_4lrn2 as PaN
elif NetworkType==3:
from FacePatches_NET import FacePatches_NET_3Conv_2Inception as PaN
elif NetworkType==4:
#from FacePatches_NET import FacePatches_NET_3Conv_1Inception as PaN
from FacePatches_NET import FacePatches_NET_3Conv_IInception_tflear as PaN
elif NetworkType==5:
from FacePatches_NET import FacePatches_NET_3Conv_2Inception_tflearn as PaN
elif NetworkType==6:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn as PaN
elif NetworkType==7:
from FacePatches_NET import FacePatches_NET_3Conv_3Inception_tflearn_ELU as PaN
elif NetworkType==24:
from FacePatches_NET import FacePatches_NET_3C_1I_2P as PaN
elif NetworkType==25:
from FacePatches_NET import FacePatches_NET_3C_2I_2P as PaN
elif NetworkType==26:
from FacePatches_NET import FacePatches_NET_3C_3I_2P as PaN
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWith Module 2, NetworkType must be 0, 1")
exit(-1)
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
#Holders for gray images
eye_p_shape=[None, 26, 64, 1]
midd_p_shape=[None, 49, 28, 1]
mou_p_shape=[None, 30, 54, 1]
eye_p = tf.placeholder(tf.float32, eye_p_shape)
midd_p = tf.placeholder(tf.float32, midd_p_shape)
mou_p = tf.placeholder(tf.float32, mou_p_shape)
#Holder for labels in a batch size of batch_size, number of labels are to be determined
labels = tf.placeholder(tf.float32, labelshape)#the number of labels are to be determined
#FacePatch_net = PaN({'eyePatch_data':eye_p, 'middlePatch_data':midd_p, 'mouthPatch_data':mou_p})
#print(type(FacePatch_net))
#softmax=FacePatch_net.layers['prob']
if NetworkType > 3 and NetworkType < 8:###current 4 5 6 7
softmax=PaN(eye_p, midd_p, mou_p)
elif NetworkType >23 and NetworkType <27:###using only eye patch and mouth patch
softmax=PaN(eye_p, mou_p)
else:
FacePatch_net = PaN({'eyePatch_data':eye_p, 'middlePatch_data':midd_p, 'mouthPatch_data':mou_p})
print(type(FacePatch_net))
softmax=FacePatch_net.layers['prob']
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=softmax),0)
#optm=tf.train.RMSPropOptimizer(lr)
optm=tf.train.AdamOptimizer(lr)
train_op=optm.minimize(loss, global_step)#for train
#for test
correcta_prediction = tf.equal(tf.argmax(softmax,1),tf.argmax(labels,1))
test_cast=tf.cast(correcta_prediction, "float")
sum_test=tf.reduce_sum(test_cast)#for large test set
accuracy = tf.reduce_mean(test_cast)#for small test set
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if loadONW:
restorefacepatchModel(DataSet, sess, NetworkType, tf.get_default_graph())
saver = tf.train.Saver()
iters=int((train_data.num_examples*Mini_Epochs)/batchSize)+1
for i in range(iters):
batch=train_data.next_batch(batchSize, shuffle=False)
tloss, _=sess.run([loss, train_op], feed_dict={eye_p:batch[2], midd_p:batch[3], mou_p:batch[4], labels:batch[5]})
if tloss<mini_loss:
mini_loss=tloss
if tloss > show_threshold:
clr=cLR*(lr_drate)**(i//lrstep)
tt=time.time()
print("CLR:%.8f Ite:%06d Bs:%03d Epo:%03d Los:%.8f mLo:%08f T:%fs"%
(clr,i,batchSize,train_data.epochs_completed, tloss, mini_loss, (tt-t1)))
else:
V_string='VALID>>'
cm_string='ConfusionMatrix>> '
for pkl_i in range(pkl_test_num):
afc=[]
v_accuracy, valid_loss, oaa, confu_mat = Valid_on_TestSet_3NI(cn, sess, accuracy, sum_test, loss, softmax,
eye_p, test_data_list[pkl_i].eyep, midd_p, test_data_list[pkl_i].middlep,
mou_p, test_data_list[pkl_i].mouthp, labels, test_data_list[pkl_i].labels,afc=afc)
laflag[pkl_i] = loss_a[pkl_i].analyzeLossVariation(valid_loss)
V_string=V_string+'D%d OAA:%f VA:%f %s mVL:%.8f VL:%.8f '%(int(PKLList[pkl_i]), oaa, v_accuracy, str(afc), loss_a[pkl_i].minimun_loss, valid_loss)
cm_string=cm_string+'D%d:'%(int(PKLList[pkl_i]))+str(confu_mat)+' '
clr=cLR*(lr_drate)**(i//lrstep)
tt=time.time()
print("CLR:%.8f Ite:%06d Bs:%03d Epo:%03d Los:%.8f mLo:%08f %s T:%fs"%
(clr,i,batchSize,train_data.epochs_completed, tloss, mini_loss, V_string, (tt-t1)))
if laflag[0]:
file_record = logfileV2(file_record, runs=runs, V_string=V_string,
final_train_loss=tloss, train_min_loss=mini_loss, TC=(tt-t1), ILR=cLR, FLR=clr, LS=lrstep, ites=i,
Epo=train_data.epochs_completed, cBS=batchSize, iBS=batchSize,
input=sys.argv, CMstring=cm_string, T=time.localtime(tt), df=dfile)
if loss_a[0].minimun_loss < stcmwvlilttv_for_mutilTest:
second_save(model_save_path, model_save_path_second)
saver.save(sess=sess, save_path=model_save_path)
'''MODULE3 ENDS---------------------------------------------------------------------------------------------'''
#
#
#
elif Module==6:
stcmwvlilttv_for_mutilTest=1.4054#value need to be determined. save_the_current_model_when_validation_loss_is_less_than_this_value
'''MODULE6----------------------------------------------------------------------------------------------------
Options for the fusion net of vgg inner_face and geometry input
-------------------------------------------------------------------------------------------------------------'''
print('Network Type: %s'%(NetworkType))
if NetworkType==440:
from Geometric_NET import Geometric_NET_2h1I as GEON
geonfcdim=1024
from VGG_NET import VGG_NET_o as APPN
appnfcdim=4096
from FintuneNet import FTN0 as FTN
elif NetworkType==441:
from Geometric_NET import Geometric_NET_2h1I as GEON
geonfcdim=1024
from VGG_NET import VGG_NET_o as APPN
appnfcdim=4096
from FintuneNet import FTN1 as FTN
else:
print("Usage: python finetune.py <GPUID> <Module> <NetworkType>\nWrong NetworkType, please check the NetworkType input again.")
exit(-1)
'''Here begins the implementation logic-------------------------------------------------------------------
-------------------------------------------------------------------------------------------------------------'''
#define geometry graph
geo_G=tf.Graph()
with geo_G.as_default():
geo_features=tf.placeholder(tf.float32, [None,122])
geo_net=GEON({'data':geo_features})
geofc=geo_net.layers['gefc2']
#print(geo_G.get_all_collection_keys())
#print(geo_G.get_collection(name='trainable_variables'))
#print(geo_G.get_collection(name='variables'))
gsaver = tf.train.Saver()
#exit()
#define appearance graph
app_G=tf.Graph()
with app_G.as_default():
images = tf.placeholder(tf.float32, m1shape)
app_net=APPN({'data':images})
appfc=app_net.layers['fc2']
asaver = tf.train.Saver()
#define fine-tuning graph
fint_G=tf.Graph()
with fint_G.as_default():
geo_fc=tf.placeholder(tf.float32, [None, geonfcdim])
app_fc=tf.placeholder(tf.float32, [None, appnfcdim])
labels = tf.placeholder(tf.float32, labelshape)#the number of labels are to be determined
fin_net=FTN({'appfc':app_fc, 'geofc':geo_fc})
softmax=fin_net.layers['prob']
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=softmax),0)
optm=tf.train.RMSPropOptimizer(lr)
train_op=optm.minimize(loss)#for train
#for test
correcta_prediction = tf.equal(tf.argmax(softmax,1),tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correcta_prediction, "float"))
#print(fint_G.get_all_collection_keys())
#print(fint_G.get_collection(name='variables'))
#print(fint_G.get_collection(name='train_op'))
#print(fint_G.get_collection(name='trainable_variables'))
#exit()
print('Geometry graph at: \t\t', geo_G)
print('Appearance graph at: \t\t', app_G)
print('Fine-tuning graph at: \t\t', fint_G)
#exit()
#different sessions have different graph
geo_sess=tf.InteractiveSession(graph=geo_G)
app_sess=tf.InteractiveSession(graph=app_G)
fin_sess=tf.InteractiveSession(graph=fint_G)
print('\n%%%%%%%Sessions are created\n')
try:
#must initialize the variables in the graph for compution or loading pretrained weights
geo_sess.run(tf.variables_initializer(var_list=geo_G.get_collection(name='variables')))
print('\nGeometry network variables initialized.')
#the gsaver must define in the graph of its owner session, or it will occur error in restoration or saving
gsaver.restore(sess=geo_sess, save_path=selectGeoModelPathForModule6_8G(TestID=TestID))
print('Geometry Model loaded')
except:
print('Unable to load the pretrained network for geo_net')
traceback.print_exc()
try:
#must initialize the variables in the graph for compution or loading pretrained weights
app_sess.run(tf.variables_initializer(var_list=app_G.get_collection(name='variables')))
print('\nAppearance network variables initialized.')
#the asaver must define in the graph of its owner session, or it will occur error in restoration or saving
asaver.restore(sess=app_sess, save_path=selectAppModelPathForModule6_8G(TestID=TestID))
print('Appearance Model loaded\n')
except:
print('Unable to load the pretrained network for app_net')
traceback.print_exc()
exit(2)
#exit()
try:
#besides the variables, the optimizer also need to be initialized.
#fin_sess.run(tf.variables_initializer(var_list=fint_G.get_collection(name='trainable_variables')))
fin_sess.run(tf.variables_initializer(var_list=fint_G.get_collection(name='variables')))
saver = tf.train.Saver()
print('\nFine-tuning network variables initialized.')
except:
print('Unable to initialize Fine-tuning network variables')
traceback.print_exc()
exit(3)
'''MODULE6 ENDS---------------------------------------------------------------------------------------------'''
newmodelname=model_save_path.split('.ckpt')[0]+'_'+str(loss_a[0].minimun_loss)+'_.ckpt'
if os.path.exists(model_save_path+'.data-00000-of-00001'):
os.rename((model_save_path+'.data-00000-of-00001'),(newmodelname+'.data-00000-of-00001'))
os.rename((model_save_path+'.index'),(newmodelname+'.index'))
os.rename((model_save_path+'.meta'),(newmodelname+'.meta'))
newmodelname_second=model_save_path_second.split('.ckpt')[0]+'_'+str(loss_a[0].second_minimun_loss)+'_.ckpt'
if os.path.exists(model_save_path_second+'.data-00000-of-00001'):
os.rename((model_save_path_second+'.data-00000-of-00001'),(newmodelname_second+'.data-00000-of-00001'))
os.rename((model_save_path_second+'.index'),(newmodelname_second+'.index'))
os.rename((model_save_path_second+'.meta'),(newmodelname_second+'.meta'))
tt=time.time()
log=log.replace('.txt',('_'+type(optm).__name__+'.txt'))
filelog=open(log,'a')
filelog.write('%s\t\t TotalTimeConsumed: %f\tOptimizer: %s\n'%(file_record, (tt-t1), str(type(optm).__name__)))
filelog.close()
if not Module==7:
print(log)
print(log.split('.txt')[0])
for log_index in range(pkl_test_num):
losslog=log.split('.txt')[0]+'_Runs%d'%(runs)+'_T%d'%(log_index+1)+'.validationlosslist'
losslog=losslog.replace('./logs/','./logs/VL/')
loss_a[log_index].outputlosslist(losslog)
except:
try:
tt=time.time()
log=log.replace('.txt',('_'+str(type(optm).__name__)+'.txt'))
filelog=open(log,'a')
filelog.write('%s\t\t TotalTimeConsumed: %f\tOptimizer: %s\n'%(file_record, (tt-t1), str(type(optm).__name__)))
filelog.close()
print('\n\n>>>>>> Saving current run info after it crrupted or interrupted.\n\n')
if not Module==7:
print(log)
print(log.split('.txt')[0])
for log_index in range(pkl_test_num):
losslog=log.split('.txt')[0]+'_Runs%d'%(runs)+'_T%d'%(log_index+1)+'.validationlosslist'
losslog=losslog.replace('./logs/','./logs/VL/')
loss_a[log_index].outputlosslist(losslog)
print('>>>>>> Current run info has been saved after it crrupted or interrupted.\n\n')
except:
print('ERROR: Fail to save current run info. after it crrupted')
ferror=open(errorlog,'w')
traceback.print_exc()
traceback.print_exc(file=ferror)
ferror.close()
| 56.070133
| 631
| 0.525635
| 17,431
| 160,697
| 4.654925
| 0.059148
| 0.003451
| 0.013286
| 0.017747
| 0.86898
| 0.857764
| 0.847252
| 0.835691
| 0.828814
| 0.811745
| 0
| 0.044242
| 0.328376
| 160,697
| 2,866
| 632
| 56.070133
| 0.707555
| 0.100083
| 0
| 0.787786
| 0
| 0.011863
| 0.143447
| 0.042193
| 0
| 0
| 0
| 0
| 0.000879
| 1
| 0.012742
| false
| 0
| 0.041301
| 0.001318
| 0.061511
| 0.126538
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8d197cc0b71d27480f1fa78e0050a6d239cd57aa
| 2,556
|
py
|
Python
|
tests/test_fluentbit_transport.py
|
laulin/fluentbit-server-py
|
6b59c2ef7eb2f3242ad328f567412d87c1fe8f01
|
[
"MIT"
] | 1
|
2022-03-04T13:59:48.000Z
|
2022-03-04T13:59:48.000Z
|
tests/test_fluentbit_transport.py
|
laulin/fluentbit-server-py
|
6b59c2ef7eb2f3242ad328f567412d87c1fe8f01
|
[
"MIT"
] | null | null | null |
tests/test_fluentbit_transport.py
|
laulin/fluentbit-server-py
|
6b59c2ef7eb2f3242ad328f567412d87c1fe8f01
|
[
"MIT"
] | null | null | null |
import unittest
import msgpack
from fluentbit_server.fluentbit_transport import FluentbitTransport, Event
class TestFluentbitTransport(unittest.TestCase):
def test_forward_mode(self):
message_bin = b'\x92\xa8random.0\x95\x92\xd7\x00_\xce\x07\xa5\x0cE\x8an\x81\xaarand_value\xcf\xaf!\x12\xa5\xfas\rb\x92\xd7\x00_\xce\x07\xa6\x0c\x17\xabQ\x81\xaarand_value\xcf\xc6\xach\x027V\xbcW\x92\xd7\x00_\xce\x07\xa7\x0c\x08\x92\xe0\x81\xaarand_value\xcf]{\x8c\xf1\xa6VY<\x92\xd7\x00_\xce\x07\xa8\x0c9}b\x81\xaarand_value\xcf\xf9?V\x1c50*\xd8\x92\xd7\x00_\xce\x07\xa9\x0b\xfelk\x81\xaarand_value\xcf\x0f\xff\x84\x9e\xb8\xb8\xbb9'
message = msgpack.unpackb(message_bin, raw=True)
ft = FluentbitTransport(None)
result = ft.forward_mode(message)
expected = [Event(b'random.0', 1607338098.884014, {b'rand_value': 12619388134949588322}),
Event(b'random.0', 1607338096.877777, {b'rand_value': 14315931674231618647}),
Event(b'random.0', 1607338096.88848, {b'rand_value': 6736132637168392508}),
Event(b'random.0', 1607338101.094242, {b'rand_value': 17960168518128249560}),
Event(b'random.0', 1607338098.223275, {b'rand_value': 1152785846868949817})]
self.assertEqual(result, expected)
def test_process(self):
message_bin = b'\x92\xa8random.0\x95\x92\xd7\x00_\xce\x07\xa5\x0cE\x8an\x81\xaarand_value\xcf\xaf!\x12\xa5\xfas\rb\x92\xd7\x00_\xce\x07\xa6\x0c\x17\xabQ\x81\xaarand_value\xcf\xc6\xach\x027V\xbcW\x92\xd7\x00_\xce\x07\xa7\x0c\x08\x92\xe0\x81\xaarand_value\xcf]{\x8c\xf1\xa6VY<\x92\xd7\x00_\xce\x07\xa8\x0c9}b\x81\xaarand_value\xcf\xf9?V\x1c50*\xd8\x92\xd7\x00_\xce\x07\xa9\x0b\xfelk\x81\xaarand_value\xcf\x0f\xff\x84\x9e\xb8\xb8\xbb9'
def callback(event):
callback.result.append(event)
message = msgpack.unpackb(message_bin, raw=True)
callback.result = list()
ft = FluentbitTransport(callback)
ft.process(message)
expected = [Event(b'random.0', 1607338098.884014, {b'rand_value': 12619388134949588322}),
Event(b'random.0', 1607338096.877777, {b'rand_value': 14315931674231618647}),
Event(b'random.0', 1607338096.88848, {b'rand_value': 6736132637168392508}),
Event(b'random.0', 1607338101.094242, {b'rand_value': 17960168518128249560}),
Event(b'random.0', 1607338098.223275, {b'rand_value': 1152785846868949817})]
self.assertEqual(callback.result, expected)
| 59.44186
| 440
| 0.68975
| 360
| 2,556
| 4.786111
| 0.263889
| 0.034823
| 0.052234
| 0.069646
| 0.800929
| 0.800929
| 0.800929
| 0.75682
| 0.75682
| 0.75682
| 0
| 0.254884
| 0.158842
| 2,556
| 42
| 441
| 60.857143
| 0.546512
| 0
| 0
| 0.482759
| 0
| 0.068966
| 0.395303
| 0.324853
| 0
| 0
| 0
| 0
| 0.068966
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8d570cb7460aa23ea63f1f8ad51d7b398461d496
| 311
|
py
|
Python
|
libs/__init__.py
|
ooby/iris-pacs
|
8311a5f42aeea7b02545b733d91e4ecf05395a38
|
[
"MIT"
] | 1
|
2021-11-18T00:58:47.000Z
|
2021-11-18T00:58:47.000Z
|
libs/__init__.py
|
sciberia-llc/pacs
|
1d955035cdfd682a75d756b14feb41e0eb8ee279
|
[
"MIT"
] | null | null | null |
libs/__init__.py
|
sciberia-llc/pacs
|
1d955035cdfd682a75d756b14feb41e0eb8ee279
|
[
"MIT"
] | null | null | null |
from .db_classes import DataElement, Patient, Series, SOPInstance, Study
from .db import Database
from .commands import handle_c_store
from .commands import handle_open
from .commands import handle_close
from .commands import handle_c_find
from .commands import handle_c_echo
from .commands import handle_c_get
| 34.555556
| 72
| 0.845659
| 47
| 311
| 5.361702
| 0.404255
| 0.285714
| 0.428571
| 0.571429
| 0.396825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115756
| 311
| 8
| 73
| 38.875
| 0.916364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8d794317dc73eca615f3af61d1c6c231b74e439b
| 78
|
py
|
Python
|
scrapers/party_funding/__init__.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 2
|
2015-04-11T12:22:41.000Z
|
2016-08-18T11:12:06.000Z
|
scrapers/party_funding/__init__.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 84
|
2015-01-22T14:33:49.000Z
|
2015-04-01T23:15:29.000Z
|
scrapers/party_funding/__init__.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 1
|
2015-04-16T03:10:39.000Z
|
2015-04-16T03:10:39.000Z
|
from fetch_party_funding import fetch
from scrape_party_funding import scrape
| 26
| 39
| 0.897436
| 12
| 78
| 5.5
| 0.5
| 0.363636
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 78
| 2
| 40
| 39
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8da36d594d0f4d9f06e148f74c085f94c013ae36
| 49,719
|
py
|
Python
|
Models/Report.py
|
fefelson/FelsonSports
|
bc0c16d63b19ffe4d468dcda5ab224013abe23fa
|
[
"MIT"
] | null | null | null |
Models/Report.py
|
fefelson/FelsonSports
|
bc0c16d63b19ffe4d468dcda5ab224013abe23fa
|
[
"MIT"
] | null | null | null |
Models/Report.py
|
fefelson/FelsonSports
|
bc0c16d63b19ffe4d468dcda5ab224013abe23fa
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from datetime import datetime, date, timedelta
from .. import Environ as ENV
from ..Interfaces import Fileable
from ..Models import yId
from ..Utils.SQL import formGDCmd, getGDCmds
from pprint import pprint
################################################################################
################################################################################
passingCmd = """
SELECT att.value, comp.value/att.value,
yds.value, yds.value/comp.value, tds.value, ints.value, rating.value
FROM lineups
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 102
GROUP BY player_id
) AS comp
ON lineups.team_id = comp.team_id AND lineups.player_id = comp.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 103
GROUP BY player_id
) AS att
ON lineups.team_id = att.team_id AND lineups.player_id = att.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 105
GROUP BY player_id
) AS yds
ON lineups.team_id = yds.team_id AND lineups.player_id = yds.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 108
GROUP BY player_id
) AS tds
ON lineups.team_id = tds.team_id AND lineups.player_id = tds.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 109
GROUP BY player_id
) AS ints
ON lineups.team_id = ints.team_id AND lineups.player_id = ints.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 113
GROUP BY player_id
) AS rating
ON lineups.team_id = rating.team_id AND lineups.player_id = rating.player_id
GROUP BY lineups.player_id
"""
ncaafRushingCmd = """
SELECT att.value, yds.value,
yds.value/att.value, tds.value
FROM lineups
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 202
GROUP BY player_id
) AS att
ON lineups.team_id = att.team_id AND lineups.player_id = att.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 203
GROUP BY player_id
) AS yds
ON lineups.team_id = yds.team_id AND lineups.player_id = yds.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 207
GROUP BY player_id
) AS tds
ON lineups.team_id = tds.team_id AND lineups.player_id = tds.player_id
GROUP BY lineups.player_id
"""
nflRushingCmd = """
SELECT att.value, yds.value,
yds.value/att.value, tds.value, fum.value
FROM lineups
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 202
GROUP BY player_id
) AS att
ON lineups.team_id = att.team_id AND lineups.player_id = att.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 203
GROUP BY player_id
) AS yds
ON lineups.team_id = yds.team_id AND lineups.player_id = yds.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 207
GROUP BY player_id
) AS tds
ON lineups.team_id = tds.team_id AND lineups.player_id = tds.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 3
GROUP BY player_id
) AS fum
ON lineups.team_id = fum.team_id AND lineups.player_id = fum.player_id
GROUP BY lineups.player_id
"""
ncaafReceivingCmd = """
SELECT rec.value, yds.value,
yds.value/rec.value, tds.value
FROM lineups
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 302
GROUP BY player_id
) AS rec
ON lineups.team_id = rec.team_id AND lineups.player_id = rec.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 303
GROUP BY player_id
) AS yds
ON lineups.team_id = yds.team_id AND lineups.player_id = yds.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 309
GROUP BY player_id
) AS tds
ON lineups.team_id = tds.team_id AND lineups.player_id = tds.player_id
GROUP BY lineups.player_id
"""
nflReceivingCmd = """
SELECT tgt.value, rec.value, yds.value,
yds.value/rec.value, tds.value, fum.value
FROM lineups
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 310
GROUP BY player_id
) AS tgt
ON lineups.team_id = tgt.team_id AND lineups.player_id = tgt.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 302
GROUP BY player_id
) AS rec
ON lineups.team_id = rec.team_id AND lineups.player_id = rec.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 303
GROUP BY player_id
) AS yds
ON lineups.team_id = yds.team_id AND lineups.player_id = yds.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 309
GROUP BY player_id
) AS tds
ON lineups.team_id = tds.team_id AND lineups.player_id = tds.player_id
INNER JOIN (
SELECT team_id, player_id, AVG(value) AS value
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
WHERE stat_id = 3
GROUP BY player_id
) AS fum
ON lineups.team_id = fum.team_id AND lineups.player_id = fum.player_id
GROUP BY lineups.player_id
"""
ncaaBBallTeamStatCmd = """
SELECT AVG(fga), SUM(fgm)/(SUM(fga)*1.0), AVG(fta),
SUM(ftm)/(SUM(fta)*1.0), AVG(tpa), SUM(tpm)/(SUM(tpa)*1.0), AVG(pts),
AVG(oreb), AVG(dreb), AVG(reb), AVG(ast), AVG(stl), AVG(blk), AVG(trn),
AVG(fls)
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN team_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
GROUP BY ts.team_id
"""
bballPlayerStatCmd = """
SELECT AVG(starter), AVG(fga), SUM(fgm)/(SUM(fga)*1.0), AVG(fta),
SUM(ftm)/(SUM(fta)*1.0), AVG(tpa), SUM(tpm)/(SUM(tpa)*1.0), AVG(pts),
AVG(oreb), AVG(reb), AVG(ast), AVG(stl), AVG(blk), AVG(trn),
AVG(fls), AVG(mins), AVG(plmn)
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
INNER JOIN lineups
ON ts.game_id = lineups.game_id AND ts.player_id = lineups.player_id
GROUP BY ts.player_id
HAVING AVG(mins) >= 10
"""
ncaabPlayerStatCmd = """
SELECT AVG(starter), AVG(fga), SUM(fgm)/(SUM(fga)*1.0), AVG(fta),
SUM(ftm)/(SUM(fta)*1.0), AVG(tpa), SUM(tpm)/(SUM(tpa)*1.0), AVG(pts),
AVG(oreb), AVG(reb), AVG(ast), AVG(stl), AVG(blk), AVG(trn),
AVG(fls), AVG(mins)
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN player_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
INNER JOIN lineups
ON ts.game_id = lineups.game_id AND ts.player_id = lineups.player_id
GROUP BY ts.player_id
"""
bballTeamStatCmd = """
SELECT AVG(fga), SUM(fgm)/(SUM(fga)*1.0), AVG(fta),
SUM(ftm)/(SUM(fta)*1.0), AVG(tpa), SUM(tpm)/(SUM(tpa)*1.0), AVG(pts),
AVG(oreb), AVG(dreb), AVG(reb), AVG(ast), AVG(stl), AVG(blk), AVG(trn),
AVG(fls), AVG(pts_in_pt), AVG(fb_pts)
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN team_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
GROUP BY ts.team_id
"""
batPlayerCmd = """
SELECT SUM(ab), SUM(r), SUM(bb), SUM(h), SUM(hr), SUM(rbi), SUM(sb),
SUM(tb), SUM(so), (SUM(h)*1.0)/SUM(ab) AS ba,
((SUM(hbp)+SUM(bb)+SUM(h)*1.0)/SUM(pa)) AS obp,
((SUM(tb)*1.0)/SUM(ab)) AS slg,
((SUM(hbp)+SUM(bb)+SUM(h)*1.0)/SUM(pa))+((SUM(tb)*1.0)/SUM(ab)) AS ops
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN batter_stats AS bs
ON gd.game_id = bs.game_id {0[andBS]}
GROUP BY bs.player_id
HAVING SUM(ab) > ?
"""
mlbTeamCmd = """
SELECT SUM((CASE WHEN ts.team_id = winner_id THEN 1 ELSE 0 END)) AS wins,
SUM((CASE WHEN ts.team_id = loser_id THEN 1 ELSE 0 END)) AS loses,
SUM(ab), SUM(r), SUM(bb), SUM(h), SUM(hr), SUM(rbi), SUM(sb), SUM(so),
SUM(lob), (SUM(h)*1.0)/SUM(ab) AS ba, ((SUM(bb)+SUM(h)*1.0)/SUM(ab)+SUM(bb)) AS obp,
SUM(ip), SUM(ra), (SUM(er)*9.0)/SUM(ip), (SUM(bba)+SUM(ha))/SUM(ip),
(SUM(k)*9.0)/SUM(ip), SUM(hra)
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN team_stats AS ts
ON gd.game_id = ts.game_id {0[andTS]}
GROUP BY ts.team_id
"""
mlbTeamGameCmd = """
SELECT SUM((CASE spread_outcome WHEN 1 THEN 1 ELSE 0 END)) ats_wins,
SUM((CASE spread_outcome WHEN -1 THEN 1 ELSE 0 END)) AS ats_loses,
AVG(spread), AVG(result), AVG(line), AVG(money),
SUM((CASE WHEN spread_outcome == 1 AND line > 0 THEN 100+line
WHEN spread_outcome == 1 AND line < 0 THEN (10000/(line*-1.0))+100
ELSE 0 END)),
SUM((CASE WHEN money_outcome == 1 AND money > 0 THEN 100+money
WHEN money_outcome == 1 AND money < 0 THEN (10000/(money*-1.0))+100
ELSE 0 END))
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN game_lines AS gl
ON gd.game_id = gl.game_id {0[andGL]}
GROUP BY gl.team_id
"""
ncaafTeamGameCmd = """
SELECT SUM((CASE spread_outcome WHEN 1 THEN 1 ELSE 0 END)) ats_wins,
SUM((CASE spread_outcome WHEN -1 THEN 1 ELSE 0 END)) AS ats_loses,
SUM((CASE spread_outcome WHEN 0 THEN 1 ELSE 0 END)) AS ats_push,
AVG(spread), AVG(result), AVG(line), AVG(money),
SUM((CASE WHEN spread_outcome == 1 AND line > 0 THEN 100+line
WHEN spread_outcome == 1 AND line < 0 THEN (10000/(line*-1.0))+100
WHEN spread_outcome == 0 THEN 100
ELSE 0 END)),
SUM((CASE WHEN money_outcome == 1 AND money > 0 THEN 100+money
WHEN money_outcome == 1 AND money < 0 THEN (10000/(money*-1.0))+100
ELSE 0 END)),
AVG(ou), AVG(over_line), AVG(under_line), AVG(ov.total),
SUM((CASE WHEN outcome == 1 AND line > 0 THEN 100+over_line
WHEN outcome == 1 AND line < 0 THEN (10000/(over_line*-1.0))+100
WHEN outcome == 0 THEN 100
ELSE 0 END)),
SUM((CASE WHEN outcome == -1 AND line > 0 THEN 100+under_line
WHEN outcome == -1 AND line < 0 THEN (10000/(under_line*-1.0))+100
WHEN outcome == 0 THEN 100
ELSE 0 END))
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN game_lines AS gl
ON gd.game_id = gl.game_id {0[andGL]}
INNER JOIN over_unders AS ov
ON gl.game_id = ov.game_id
GROUP BY gl.team_id
"""
ncaafTeamStatCmd = """
SELECT AVG(value)
FROM team_stats AS ts
INNER JOIN ( {0[gdCmd]} ) AS gd
ON ts.game_id = gd.game_id {0[andTS]}
INNER JOIN stat_types AS st
ON ts.stat_id = st.stat_id
WHERE st.abrv = ?
GROUP BY team_id
"""
ncaafTeamPlayerStatCmd = """
SELECT AVG(value)
FROM (SELECT SUM(value) AS value, team_id
FROM player_stats AS ts
INNER JOIN ( {0[gdCmd]} ) AS gd
ON ts.game_id = gd.game_id {0[andTS]}
INNER JOIN stat_types AS st
ON ts.stat_id = st.stat_id
WHERE st.abrv = ?
GROUP BY ts.game_id, team_id)
GROUP by team_id
"""
pitchPlayerCmd = """
SELECT SUM(w), SUM(l), SUM(sv), SUM(ip), SUM(bba), SUM(ha), SUM(k), SUM(hra),
(SUM(er)*9.0)/SUM(ip), (SUM(bba)+SUM(ha))/SUM(ip), (SUM(k)*9)/SUM(ip)
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN pitcher_stats AS ps
ON gd.game_id = ps.game_id {0[andPS]}
INNER JOIN bullpens AS bp
ON gd.game_id = bp.game_id AND ps.player_id = bp.player_id
WHERE pitch_order > 1
GROUP BY ps.player_id
HAVING SUM(ip) > ?
"""
startPlayerCmd = """
SELECT SUM(w), SUM(l), SUM(sv), SUM(ip), SUM(bba), SUM(ha), SUM(k), SUM(hra),
(SUM(er)*9.0)/SUM(ip), (SUM(bba)+SUM(ha))/SUM(ip), (SUM(k)*9)/SUM(ip)
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN pitcher_stats AS ps
ON gd.game_id = ps.game_id {0[andPS]}
INNER JOIN bullpens AS bp
ON gd.game_id = bp.game_id AND ps.player_id = bp.player_id
WHERE pitch_order = 1
GROUP BY ps.player_id
HAVING SUM(ip) > ?
"""
startGameCmd = """
SELECT SUM((CASE spread_outcome WHEN 1 THEN 1 ELSE 0 END)) ats_wins,
SUM((CASE spread_outcome WHEN -1 THEN 1 ELSE 0 END)) AS ats_loses,
AVG(spread), AVG(result), AVG(line), AVG(money),
SUM((CASE WHEN spread_outcome == 1 AND line > 0 THEN 100+line
WHEN spread_outcome == 1 AND line < 0 THEN 100+(10000/(line*-1.0))
ELSE 0 END)),
SUM((CASE WHEN money_outcome == 1 AND money > 0 THEN 100+money
WHEN money_outcome == 1 AND money < 0 THEN 100+(10000/(money*-1.0))
ELSE 0 END))
FROM ( {0[gdCmd]} ) AS gd
INNER JOIN game_lines AS gl
ON gd.game_id = gl.game_id {0[andGL]}
INNER JOIN bullpens AS bp
ON gd.game_id = bp.game_id
WHERE pitch_order = 1
GROUP BY player_id
"""
timeFrame = ("Season", "2Months", "1Month", "2Weeks")
today = date.today()
twoWeeks = today - timedelta(14)
oneMonth = today - timedelta(30)
twoMonths = today - timedelta(60)
################################################################################
################################################################################
_awayHomeDict = dict([(label, {}) for label in ("all", "away", "home")])
_winLossDict = dict([(label, {}) for label in ("all", "winner", "loser")])
_awayWinDict = dict([(label, deepcopy(_winLossDict)) for label in ("all", "away", "home")])
class Report(Fileable):
_info = {"playerStats": {},
"bullpenStats":{},
"starterStats":{},
"teamStats": {},
"teamGaming":{},
"startGaming":{},
"batterStats":{},
"leagueId": None,
"lastUpdate": str(datetime.today()),
}
_reportFilePath = None
def __init__(self, league, *args, **kwargs):
Fileable.__init__(self, self._info, *args, **kwargs)
self.league = league
def create(self):
self.setFilePath()
print("new Report", self.filePath)
self.info = deepcopy(self._info)
self.reportData()
self.write()
def score(self, stat, data):
values = sorted([x[stat] for x in data if x[stat]])
sDict = {}
sDict[1] = values[int(.9*len(values))]
sDict[2] = values[int(.8*len(values))]
sDict[3] = values[int(.6*len(values))]
sDict[4] = values[int(.4*len(values))]
sDict[5] = values[int(.2*len(values))]
return sDict.copy()
def playerScore(self, stat, data):
values = sorted([x[stat] for x in data if x[stat]])
sDict = {}
sDict[1] = values[int(.95*len(values))]
sDict[2] = values[int(.9*len(values))]
sDict[3] = values[int(.8*len(values))]
sDict[4] = values[int(.7*len(values))]
sDict[5] = values[int(.6*len(values))]
return sDict.copy()
def teamScore(self, stat, data):
values = sorted([x[stat] for x in data if x[stat]])
sDict = {}
sDict[1] = values[int(.9*len(values))]
sDict[2] = values[int(.8*len(values))]
sDict[3] = values[int(.6*len(values))]
sDict[4] = values[int(.4*len(values))]
sDict[5] = values[int(.2*len(values))]
return sDict.copy()
def score1(self, data):
values = sorted(data)
sDict = {}
try:
sDict[1] = values[int(.95*len(values))]
sDict[2] = values[int(.9*len(values))]
sDict[3] = values[int(.8*len(values))]
sDict[4] = values[int(.6*len(values))]
sDict[5] = values[int(.5*len(values))]
except IndexError:
pass
return sDict.copy()
def setFilePath(self):
self.filePath = ENV.reportFilePath.format(self._info)
def reportData(self):
pass
################################################################################
################################################################################
class MLBReport(Report):
_batStats = ("ab", "r", "bb", "h", "hr", "rbi", "sb", "tb", "so", "avg", "obp", "slg", "ops")
_pitchStats = ("w","l", "sv", "ip", "bba", "ha", "k", "hra", "era", "whip", "k9")
_startGaming = ("atsW", "atsL", "spread", "result", "spreadLine", "moneyLine", "ats$", "money$")
_teamStats = ("w", "l", "ab","r","bb", "h", "hr","rbi","sb","so","lob","avg", "obp", "ip","ra","era","whip","k9","hra")
_teamGaming = ("atsW", "atsL", "atsP", "spread", "result", "spreadLine", "moneyLine", "ats$", "money$", "ou", "overLine", "underLine", "total", "over$", "under$")
_info = {"batterStats": dict(zip(timeFrame, [deepcopy(_awayHomeDict) for _ in timeFrame])),
"teamStats": dict(zip(timeFrame, [deepcopy(_awayHomeDict) for _ in timeFrame])),
"teamGaming": dict(zip(timeFrame, [deepcopy(_awayHomeDict) for _ in timeFrame])),
"bullpenStats": dict(zip(timeFrame, [deepcopy(_awayHomeDict) for _ in timeFrame])),
"starterStats": dict(zip(timeFrame, [deepcopy(_awayHomeDict) for _ in timeFrame])),
"startGaming": dict(zip(timeFrame, [deepcopy(_awayHomeDict) for _ in timeFrame])),
"leagueId": "mlb",
"lastUpdate": str(datetime.today()),
}
def __init__(self, league, *args, **kwargs):
super().__init__(league, *args, **kwargs)
def reportData(self):
abLimits = dict(zip(timeFrame, (150,80,40,20)))
bullLimits = dict(zip(timeFrame, (52,24,14,6)))
startLimits = dict(zip(timeFrame, (120,30,15,10)))
currentSeason = self.league.fileManager.info["currentSeason"]
gdCmds = {"Season": formGDCmd(currentSeason),
"2Weeks": formGDCmd(currentSeason, twoWeeks),
"1Month": formGDCmd(currentSeason, oneMonth),
"2Months": formGDCmd(currentSeason, twoMonths)
}
for tF in timeFrame:
for hA in ("all", "away", "home"):
div = 1 if hA == "all" else 2
gdCmd = gdCmds[tF]
andBS = "" if hA == "all" else "AND gd.{}_id = bs.team_id".format(hA)
andTS = "" if hA == "all" else "AND gd.{}_id = ts.team_id".format(hA)
andPS = "" if hA == "all" else "AND gd.{}_id = ps.team_id".format(hA)
andGL = "" if hA == "all" else "AND gd.{}_id = gl.team_id".format(hA)
batData = [dict(zip(self._batStats, player)) for player in self.league.dbManager.fetchAll(batPlayerCmd.format({"gdCmd":gdCmd, "andBS":andBS}), (abLimits[tF]/div,))]
teamData = [dict(zip(self._teamStats, player)) for player in self.league.dbManager.fetchAll(mlbTeamCmd.format({"gdCmd":gdCmd, "andTS":andTS}))]
startData = [dict(zip(self._pitchStats, player)) for player in self.league.dbManager.fetchAll(startPlayerCmd.format({"gdCmd":gdCmd, "andPS":andPS}), (startLimits[tF]/div,))]
bullData = [dict(zip(self._pitchStats, player)) for player in self.league.dbManager.fetchAll(pitchPlayerCmd.format({"gdCmd":gdCmd, "andPS":andPS}), (bullLimits[tF]/div,))]
gameData = [dict(zip(self._teamGaming, player)) for player in self.league.dbManager.fetchAll(mlbTeamGameCmd.format({"gdCmd":gdCmd, "andGL":andGL}))]
startGameData = [dict(zip(self._startGaming, player)) for player in self.league.dbManager.fetchAll(startGameCmd.format({"gdCmd":gdCmd, "andGL":andGL}))]
for stat in self._batStats:
self.info["batterStats"][tF][hA][stat] = self.score(stat, batData, True)
for stat in self._pitchStats:
if stat in ("bba", "ha", "hra", "era","whip", "l"):
reverse = False
else:
reverse = True
self.info["starterStats"][tF][hA][stat] = self.score(stat, startData)
self.info["bullpenStats"][tF][hA][stat] = self.score(stat, bullData)
for stat in self._teamStats:
if stat in ("lob","so","era","whip","ra","hra"):
reverse = False
else:
reverse = True
self.info["teamStats"][tF][hA][stat] = self.score(stat, teamData)
for stat in self._startGaming:
if stat in ("ats$", "money$"):
data = []
for x in startGameData:
total = (x["atsW"]+x["atsL"])*100
result = x[stat]
data.append({stat:((result-total)/total)*100})
self.info["startGaming"][tF][hA][stat] = self.score(stat, data)
else:
self.info["startGaming"][tF][hA][stat] = self.score(stat, startGameData)
for stat in self._teamGaming:
if stat in ("ats$", "money$"):
data = []
for x in gameData:
total = (x["atsW"]+x["atsL"])*100
result = x[stat]
data.append({stat:((result-total)/total)*100})
self.info["teamGaming"][tF][hA][stat] = self.score(stat, data)
else:
self.info["teamGaming"][tF][hA][stat] = self.score(stat, gameData)
################################################################################
################################################################################
class NCAAFReport(Report):
_teamGaming = ("atsW", "atsL", "atsP", "spread", "result", "spreadLine", "moneyLine", "ats$", "money$", "ou", "overLine", "underLine", "total", "over$", "under$")
_passList = ("att", "comp%", "yds", "avg", "td", "int", "rating")
_rushList = ("car", "yds", "avg", "td")
_recList = ("rec", "yds", "avg", "td")
_info = {
"teamGaming": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices])),
"teamStats": {"regular": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices])),
"reverse": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices]))
},
"playerStats": {"passing": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices])),
"rushing": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices])),
"receiving": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices]))
},
"leagueId": "ncaaf",
"lastUpdate": str(datetime.today()),
}
def __init__(self, league, *args, **kwargs):
super().__init__(league, *args, **kwargs)
def reportData(self):
currentSeason = self.league.fileManager.info["currentSeason"]
gdCmds = getGDCmds(int(currentSeason))
timeFrame = ENV.tFFootballChoices
currentSeason = self.league.fileManager.info["currentSeason"]
teamStatList = [x[0] for x in self.league.dbManager.fetchAll("SELECT abrv FROM stat_types WHERE stat_id > 900")]
for tF in timeFrame:
for hA in ("all", "away", "home"):
teamStats = {}
gdCmd = gdCmds[tF]
andTS = "" if hA == "all" else "AND gd.{}_id = ts.team_id".format(hA)
andGL = "" if hA == "all" else "AND gd.{}_id = gl.team_id".format(hA)
gameData = [dict(zip(self._teamGaming, player)) for player in self.league.dbManager.fetchAll(ncaafTeamGameCmd.format({"gdCmd":gdCmd, "andGL":andGL}))]
for stat in self._teamGaming:
try:
if stat in ("ats$", "money$"):
data = []
for x in gameData:
total = (x["atsW"]+x["atsL"]+x["atsP"])*100
result = x[stat]
data.append({stat:((result-total)/total)*100})
self.info["teamGaming"][tF][hA][stat] = self.score(stat, data)
else:
self.info["teamGaming"][tF][hA][stat] = self.score(stat, gameData)
except IndexError:
pass
for label in teamStatList:
teamData = [x[0] for x in self.league.dbManager.fetchAll(ncaafTeamStatCmd.format({"gdCmd": gdCmd, "andTS": andTS}), (label,))]
if label in ("TmPaSACKS", "TO", "PEN", "PENYds", "PaTDs", "RuTDs", "TmFum", "TmINTS"):
self.info["teamStats"]["regular"][tF][hA][label] = self.score1(teamData)
self.info["teamStats"]["reverse"][tF][hA][label] = self.score1(teamData)
else:
self.info["teamStats"]["regular"][tF][hA][label] = self.score1(teamData)
self.info["teamStats"]["reverse"][tF][hA][label] = self.score1(teamData)
for label in ("PaTDs", "RuTDs"):
teamData = [x[0] for x in self.league.dbManager.fetchAll(ncaafTeamPlayerStatCmd.format({"gdCmd": gdCmd, "andTS": andTS}), (label, ))]
self.info["teamStats"]["regular"][tF][hA][label] = self.score1(teamData)
self.info["teamStats"]["reverse"][tF][hA][label] = self.score1(teamData)
for label in ("passing", "rushing", "receiving"):
statCmd, statList = {"passing": (passingCmd, self._passList), "rushing": (ncaafRushingCmd, self._rushList), "receiving": (ncaafReceivingCmd, self._recList)}[label]
playerData = [dict(zip(statList, player)) for player in self.league.dbManager.fetchAll(statCmd.format({"gdCmd": gdCmd, "andTS": andTS}))]
pprint(playerData)
for stat in statList:
if stat in ("ints", "fum",):
self.info["playerStats"][label][tF][hA][stat] = self.score(stat, playerData)
else:
self.info["playerStats"][label][tF][hA][stat] = self.score(stat, playerData)
################################################################################
################################################################################
class NFLReport(Report):
_teamGaming = ("atsW", "atsL", "atsP", "spread", "result", "spreadLine", "moneyLine", "ats$", "money$", "ou", "overLine", "underLine", "total", "over$", "under$")
_passList = ("att", "comp%", "yds", "avg", "td", "int", "rating")
_rushList = ("car", "yds", "avg", "td", "fum")
_recList = ("tgt", "rec", "yds", "avg", "td", "fum")
_info = {
"teamGaming": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices])),
"teamStats": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices])),
"playerStats": {"passing": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices])),
"rushing": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices])),
"receiving": dict(zip(ENV.tFFootballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFFootballChoices]))
},
"leagueId": "nfl",
"lastUpdate": str(datetime.today()),
}
def __init__(self, league, *args, **kwargs):
super().__init__(league, *args, **kwargs)
def reportData(self):
currentSeason = self.league.fileManager.info["currentSeason"]
gdCmds = getGDCmds(int(currentSeason))
timeFrame = ENV.tFFootballChoices
currentSeason = self.league.fileManager.info["currentSeason"]
teamStatList = [x[0] for x in self.league.dbManager.fetchAll("SELECT abrv FROM stat_types WHERE stat_id > 900")]
for tF in timeFrame:
for hA in ("all", "away", "home"):
teamStats = {}
gdCmd = gdCmds[tF]
andTS = "" if hA == "all" else "AND gd.{}_id = ts.team_id".format(hA)
andGL = "" if hA == "all" else "AND gd.{}_id = gl.team_id".format(hA)
gameData = [dict(zip(self._teamGaming, player)) for player in self.league.dbManager.fetchAll(ncaafTeamGameCmd.format({"gdCmd":gdCmd, "andGL":andGL}))]
for stat in self._teamGaming:
try:
if stat in ("ats$", "money$", "over$", "under$"):
data = []
for x in gameData:
total = (x["atsW"]+x["atsL"]+x["atsP"])*100
result = x[stat]
data.append({stat:((result-total)/total)*100})
self.info["teamGaming"][tF][hA][stat] = self.score(stat, data)
else:
self.info["teamGaming"][tF][hA][stat] = self.score(stat, gameData)
except IndexError:
pass
for label in teamStatList:
teamData = [x[0] for x in self.league.dbManager.fetchAll(ncaafTeamStatCmd.format({"gdCmd": gdCmd, "andTS": andTS}), (label,))]
self.info["teamStats"][tF][hA][label] = self.score1(teamData)
for label in ("PaTDs", "RuTDs"):
teamData = [x[0] for x in self.league.dbManager.fetchAll(ncaafTeamPlayerStatCmd.format({"gdCmd": gdCmd, "andTS": andTS}), (label, ))]
self.info["teamStats"][tF][hA][label] = self.score1(teamData)
for label in ("passing", "rushing", "receiving"):
print(label, tF, hA)
statCmd, statList = {"passing": (passingCmd, self._passList), "rushing": (nflRushingCmd, self._rushList), "receiving": (nflReceivingCmd, self._recList)}[label]
playerData = [dict(zip(statList, player)) for player in self.league.dbManager.fetchAll(statCmd.format({"gdCmd": gdCmd, "andTS": andTS}))]
for stat in statList:
self.info["playerStats"][label][tF][hA][stat] = self.score(stat, playerData)
################################################################################
################################################################################
class NBAReport(Report):
_teamGaming = ("atsW", "atsL", "atsP", "spread", "result", "spreadLine", "moneyLine", "ats$", "money$", "ou", "overLine", "underLine", "total", "over$", "under$")
_info = {
"teamGaming": dict(zip(ENV.tFBasketballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFBasketballChoices])),
"teamStats": dict(zip(ENV.tFBasketballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFBasketballChoices])),
"playerStats": dict(zip(ENV.tFBasketballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFBasketballChoices])),
"leagueId": "nba",
"lastUpdate": str(datetime.today()),
}
def __init__(self, league, *args, **kwargs):
super().__init__(league, *args, **kwargs)
def reportData(self):
currentSeason = self.league.fileManager.info["currentSeason"]
gdCmds = getGDCmds(int(currentSeason))
timeFrame = ENV.tFBasketballChoices
teamStatList = ("fga", "fg%", "fta", "ft%", "tpa", "tp%", "pts", "oreb", "dreb", "reb",
"ast", "stl", "blk", "trn", "fls", "pts_in_pt", "fb_pts")
playerStatList = ("start%","fga", "fg%", "fta", "ft%", "tpa", "tp%", "pts", "oreb", "reb",
"ast", "stl", "blk", "trn", "fls", "mins", "plmn")
for tF in timeFrame:
gdCmd = gdCmds[tF]
for hA in ("all", "away", "home"):
andTS = "" if hA == "all" else "AND gd.{}_id = ts.team_id".format(hA)
andGL = "" if hA == "all" else "AND gd.{}_id = gl.team_id".format(hA)
gameData = [dict(zip(self._teamGaming, player)) for player in self.league.dbManager.fetchAll(ncaafTeamGameCmd.format({"gdCmd":gdCmd, "andGL":andGL}))]
for stat in self._teamGaming:
try:
if stat in ("ats$", "money$"):
data = []
for x in gameData:
total = (x["atsW"]+x["atsL"]+x["atsP"])*100
result = x[stat]
data.append({stat:((result-total)/total)*100})
self.info["teamGaming"][tF][hA][stat] = self.score(stat, data)
elif stat in ("over$", "under$"):
data = []
for x in gameData:
total = (x["atsW"]+x["atsL"]+x["atsP"])*100
result = x[stat]
data.append({stat:((result-total)/total)*100})
self.info["teamGaming"][tF][hA][stat] = self.score(stat, data)
else:
self.info["teamGaming"][tF][hA][stat] = self.score(stat, gameData)
except IndexError:
pass
teamData = [dict(zip(teamStatList, player)) for player in self.league.dbManager.fetchAll(bballTeamStatCmd.format({"gdCmd":gdCmd, "andTS":andTS}))]
playerData = [dict(zip(playerStatList, player)) for player in self.league.dbManager.fetchAll(bballPlayerStatCmd.format({"gdCmd":gdCmd, "andTS":andTS}))]
for label in teamStatList:
self.info["teamStats"][tF][hA][label] = self.teamScore(label, teamData)
for label in playerStatList:
self.info["playerStats"][tF][hA][label] = self.playerScore(label, playerData)
################################################################################
################################################################################
class NCAABReport(Report):
_teamGaming = ("atsW", "atsL", "atsP", "spread", "result", "spreadLine", "moneyLine", "ats$", "money$", "ou", "overLine", "underLine", "total", "over$", "under$")
_info = {
"teamGaming": dict(zip(ENV.tFBasketballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFBasketballChoices])),
"teamStats": dict(zip(ENV.tFBasketballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFBasketballChoices])),
"playerStats": dict(zip(ENV.tFBasketballChoices, [deepcopy(_awayHomeDict) for _ in ENV.tFBasketballChoices])),
"leagueId": "ncaab",
"lastUpdate": str(datetime.today()),
}
def __init__(self, league, *args, **kwargs):
super().__init__(league, *args, **kwargs)
def reportData(self):
currentSeason = self.league.fileManager.info["currentSeason"]
gdCmds = getGDCmds(int(currentSeason))
timeFrame = ENV.tFBasketballChoices
teamStatList = ("fga", "fg%", "fta", "ft%", "tpa", "tp%", "pts", "oreb", "dreb", "reb",
"ast", "stl", "blk", "trn", "fls")
playerStatList = ("start%","fga", "fg%", "fta", "ft%", "tpa", "tp%", "pts", "oreb", "reb",
"ast", "stl", "blk", "trn", "fls", "mins")
for tF in timeFrame:
gdCmd = gdCmds[tF]
for hA in ("all", "away", "home"):
andTS = "" if hA == "all" else "AND gd.{}_id = ts.team_id".format(hA)
andGL = "" if hA == "all" else "AND gd.{}_id = gl.team_id".format(hA)
gameData = [dict(zip(self._teamGaming, player)) for player in self.league.dbManager.fetchAll(ncaafTeamGameCmd.format({"gdCmd":gdCmd, "andGL":andGL}))]
for stat in self._teamGaming:
try:
if stat in ("ats$", "money$"):
data = []
for x in gameData:
total = (x["atsW"]+x["atsL"]+x["atsP"])*100
result = x[stat]
data.append({stat:((result-total)/total)*100})
self.info["teamGaming"][tF][hA][stat] = self.score(stat, data)
elif stat in ("over$", "under$"):
data = []
for x in gameData:
total = (x["atsW"]+x["atsL"]+x["atsP"])*100
result = x[stat]
data.append({stat:((result-total)/total)*100})
self.info["teamGaming"][tF][hA][stat] = self.score(stat, data)
else:
self.info["teamGaming"][tF][hA][stat] = self.score(stat, gameData)
except IndexError:
pass
teamData = [dict(zip(teamStatList, player)) for player in self.league.dbManager.fetchAll(ncaaBBallTeamStatCmd.format({"gdCmd":gdCmd, "andTS":andTS}))]
playerData = [dict(zip(playerStatList, player)) for player in self.league.dbManager.fetchAll(ncaabPlayerStatCmd.format({"gdCmd":gdCmd, "andTS":andTS}))]
for label in teamStatList:
# print(label)
self.info["teamStats"][tF][hA][label] = self.teamScore(label, teamData)
for label in playerStatList:
self.info["playerStats"][tF][hA][label] = self.playerScore(label, playerData)
def playerScore(self, stat, data):
values = sorted([x[stat] for x in data if x[stat]])
sDict = {}
sDict[1] = values[int(.99*len(values))]
sDict[2] = values[int(.9*len(values))]
sDict[3] = values[int(.8*len(values))]
sDict[4] = values[int(.7*len(values))]
sDict[5] = values[int(.6*len(values))]
return sDict.copy()
################################################################################
################################################################################
| 47.396568
| 189
| 0.446067
| 5,100
| 49,719
| 4.242745
| 0.064902
| 0.038081
| 0.01368
| 0.016175
| 0.850957
| 0.838248
| 0.819115
| 0.797948
| 0.777752
| 0.763472
| 0
| 0.017853
| 0.409682
| 49,719
| 1,048
| 190
| 47.441794
| 0.719387
| 0.000241
| 0
| 0.740409
| 0
| 0.030691
| 0.53524
| 0.017739
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024297
| false
| 0.019182
| 0.008951
| 0
| 0.075448
| 0.005115
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a5cb5a80c3a5788c4a05d452d151ff487192ef7d
| 23,177
|
py
|
Python
|
main/src/CRCL/FloodCRisisCLassification/topic104Flood.py
|
beAWARE-project/crisis-classification
|
2061a2ee57fd502bd973fdfcffc6d7098049b5ed
|
[
"Apache-2.0"
] | null | null | null |
main/src/CRCL/FloodCRisisCLassification/topic104Flood.py
|
beAWARE-project/crisis-classification
|
2061a2ee57fd502bd973fdfcffc6d7098049b5ed
|
[
"Apache-2.0"
] | 5
|
2018-03-08T16:15:35.000Z
|
2018-04-10T14:34:41.000Z
|
main/src/CRCL/FloodCRisisCLassification/topic104Flood.py
|
beAWARE-project/crisis-classification
|
2061a2ee57fd502bd973fdfcffc6d7098049b5ed
|
[
"Apache-2.0"
] | null | null | null |
import json, time, re
import os, errno
from pathlib import Path
from pandas import read_csv, DataFrame, concat, ExcelWriter
from datetime import datetime, timedelta
from math import pow, ceil
from collections import OrderedDict
from CRCL.FloodCRisisCLassification.Topic104_Metric_Report import Top104_Metric_Report
from CRCL.FloodCRisisCLassification.Auxiliary_functions import *
from bus.bus_producer import BusProducer
#-------------------------------------------------------------------------------------------
# Create topic 104 for Water Level Measurement and its category for every River Section
#
def topic104FloodIndex(directory, flag_last_run, response_forecast, max_yValues, meas_color, meas_note,
max_measurementID, max_measurementTimeStamp, dataSeriesID, dataSeriesName, xVals, dataStreamName,
dataStreamID, dataStreamDescript, dates, thresh, riverSections,
RiverSect_CountScale, total_top104, counter, mapRS_df, producer):
# Get the appropriate row of the mapRS_df
# mapRS_df['SensorID'] == riverSections["value"][counter]['@iot.id'])
row_mapRS_df = mapRS_df.index[ mapRS_df['SensorID'] == riverSections["value"][counter]['@iot.id'] ][0]
#print("row_mapRS_df = ", row_mapRS_df, " ID = ", riverSections["value"][counter]['@iot.id'] )
# Set variables for the body of the message
dataStreamGener = "CRCL"
dataStreamName += ['PWLm_Predicted Water Level Measurement']
dataStreamID += ['FLCR_1002_PWLm']
dataStreamName += ['PWLc_Predicted Water Level Category']
dataStreamID += ['FLCR_1102_PWLc']
if flag_last_run == True:
lastRunID = response_forecast['Datastreams'][0]["properties"]["lastRunId"]
# dataStreamID = str(lastRunID) + "_" + str(datetime.utcnow().microsecond)
dataStreamDescript += ["AMICO predictions of water level in the last run with ID:" + str(lastRunID)]
dataStreamDescript += ["AMICO predictions of water level category in the last run with ID:" + str(lastRunID)]
else:
ObsRunID = response_forecast['Datastreams'][0]['Observations'][0]["parameters"]["runId"]
# dataStreamID = str(ObsRunID) + "_" + str(datetime.utcnow().microsecond)
dataStreamDescript += [
"AMICO predictions of water level in the run with ID:" + str(ObsRunID) + " at dates: " + str(
dates[0]) + " to " + str(dates[1])]
dataStreamDescript += [
"AMICO predictions of water level category in the run with ID:" + str(ObsRunID) + " at dates: " + str(
dates[0]) + " to " + str(dates[1])]
lang = "en-US"
dataStreamCategory = "Met"
dataStreamSubCategory = "Flood"
# Position of the specific river section
#
#position = [round(loc_riverSection[0], 5), round(loc_riverSection[1], 5)]
position = [round(mapRS_df['Long'].iloc[row_mapRS_df], 5),
round(mapRS_df['Lat'].iloc[row_mapRS_df], 5)]
# Set variables for the header of the message
district = "Vicenza"
# Unique message identifier
msgIdent = datetime.utcnow().isoformat().replace(":", "").replace("-", "").replace(".", "MS")
sent_dateTime = datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
status = "Actual"
actionType = "Update"
scope = "Public"
code = 20190617001
# Call the class Top104_Metric_Report to create an object data of this class
#
top104_forecast = []
for tit in range(0, 2):
# topic for forecast WL
data = Top104_Metric_Report(msgIdent, sent_dateTime, status, actionType, scope, district, code,
dataStreamGener, dataStreamID[tit], dataStreamName[tit], dataStreamDescript[tit],
lang, dataStreamCategory, dataStreamSubCategory, position)
# Record the thresholds for each river Section in the header note
data.topic_note = "Threshold_1=" + str(thresh[0]) + ", " + "Threshold_2=" + str(
thresh[1]) + ", " + "Threshold_3=" + str(thresh[2])
# create the header of the object
data.create_dictHeader()
# create the measurements of the object
#
# topic for forecast WL
data.topic_yValue = [max_yValues[tit]]
data.topic_measurementID = [max_measurementID[tit]]
data.topic_measurementTimeStamp = [max_measurementTimeStamp[tit]]
#data.topic_dataSeriesID = [dataSeriesID[tit]]
#data.topic_dataSeriesName = [dataSeriesName[tit]]
data.topic_dataSeriesID = [mapRS_df['DataSeriesID'].iloc[row_mapRS_df]]
data.topic_dataSeriesName = [mapRS_df['DataSeriesName'].iloc[row_mapRS_df]]
data.topic_xValue = [xVals[tit]]
data.topic_meas_color = [meas_color[tit]]
data.topic_meas_note = [meas_note[tit]]
# call class function
data.create_dictMeasurements()
# create the body of the object
data.create_dictBody()
# create the TOP104_METRIC_REPORT as json for WL forecasts
top104_item = OrderedDict()
top104_item['header'] = data.header
top104_item['body'] = data.body
# write json (top104_item) to output file
if tit == 0:
flname = directory + "/" + 'TOP104_forecasts_WL_' + riverSections["value"][counter]['name'].replace(" ",
"") + ".txt"
else:
flname = directory + "/" + 'TOP104_forecasts_WL_Category_' + riverSections["value"][counter][
'name'].replace(" ", "") + ".txt"
with open(flname, 'w') as outfile:
json.dump(top104_item, outfile, indent=4)
top104_forecast += [top104_item]
if len(top104_forecast) != 0:
print(
'Send message: Max Predicted Water Level value and its Category have been forwarded to logger into 2 separate messages!')
for it in range(len(top104_forecast)):
producer.send("TOP104_METRIC_REPORT", top104_forecast[it])
total_top104 = total_top104 + 1
print( "total_top104 = ", total_top104)
print("\n ***** TOPIC: ")
print(top104_forecast[it])
print("*******\n")
else:
print('No messages will be forward to logger!!!')
return total_top104
#-------------------------------------------------------------------------------------------
# Create topic 104 for Water Level Measurement and its category for every River Section
#
def topic104FloodIndex_VER14(directory, flag_last_run, response_forecast, max_yValues, meas_color, meas_note,
max_measurementID, max_measurementTimeStamp, dataSeriesID, dataSeriesName, xVals, dataStreamName,
dataStreamID, dataStreamDescript, dates, thresh, riverSections,
RiverSect_CountScale, counter, mapRS_df):
# Get the appropriate row of the mapRS_df
# mapRS_df['SensorID'] == riverSections["value"][counter]['@iot.id'])
row_mapRS_df = mapRS_df.index[ mapRS_df['SensorID'] == riverSections["value"][counter]['@iot.id'] ][0]
#print("row_mapRS_df = ", row_mapRS_df, " ID = ", riverSections["value"][counter]['@iot.id'] )
# Set variables for the body of the message
dataStreamGener = "CRCL"
dataStreamName += ['PWLm_Predicted Water Level Measurement']
dataStreamID += ['FLCR_1002_PWLm']
dataStreamName += ['PWLc_Predicted Water Level Category']
dataStreamID += ['FLCR_1102_PWLc']
if flag_last_run == True:
lastRunID = response_forecast['Datastreams'][0]["properties"]["lastRunId"]
# dataStreamID = str(lastRunID) + "_" + str(datetime.utcnow().microsecond)
dataStreamDescript += ["AMICO predictions of water level in the last run with ID:" + str(lastRunID)]
dataStreamDescript += ["AMICO predictions of water level category in the last run with ID:" + str(lastRunID)]
else:
ObsRunID = response_forecast['Datastreams'][0]['Observations'][0]["parameters"]["runId"]
# dataStreamID = str(ObsRunID) + "_" + str(datetime.utcnow().microsecond)
dataStreamDescript += [
"AMICO predictions of water level in the run with ID:" + str(ObsRunID) + " at dates: " + str(
dates[0]) + " to " + str(dates[1])]
dataStreamDescript += [
"AMICO predictions of water level category in the run with ID:" + str(ObsRunID) + " at dates: " + str(
dates[0]) + " to " + str(dates[1])]
lang = "en-US"
dataStreamCategory = "Met"
dataStreamSubCategory = "Flood"
# Position of the specific river section
#
#position = [round(loc_riverSection[0], 5), round(loc_riverSection[1], 5)]
position = [round(mapRS_df['Long'].iloc[row_mapRS_df], 5),
round(mapRS_df['Lat'].iloc[row_mapRS_df], 5)]
# Set variables for the header of the message
district = "Vicenza"
# Unique message identifier
msgIdent = datetime.utcnow().isoformat().replace(":", "").replace("-", "").replace(".", "MS")
sent_dateTime = datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
status = "Actual"
actionType = "Update"
scope = "Public"
code = 20190617001
# Call the class Top104_Metric_Report to create an object data of this class
#
top104_forecast = []
for tit in range(0, 2):
# topic for forecast WL
data = Top104_Metric_Report(msgIdent, sent_dateTime, status, actionType, scope, district, code,
dataStreamGener, dataStreamID[tit], dataStreamName[tit], dataStreamDescript[tit],
lang, dataStreamCategory, dataStreamSubCategory, position)
# Record the thresholds for each river Section in the header note
data.topic_note = "Threshold_1=" + str(thresh[0]) + ", " + "Threshold_2=" + str(
thresh[1]) + ", " + "Threshold_3=" + str(thresh[2])
# create the header of the object
data.create_dictHeader()
# create the measurements of the object
#
# topic for forecast WL
data.topic_yValue = [max_yValues[tit]]
data.topic_measurementID = [max_measurementID[tit]]
data.topic_measurementTimeStamp = [max_measurementTimeStamp[tit]]
#data.topic_dataSeriesID = [dataSeriesID[tit]]
#data.topic_dataSeriesName = [dataSeriesName[tit]]
data.topic_dataSeriesID = [mapRS_df['DataSeriesID'].iloc[row_mapRS_df]]
data.topic_dataSeriesName = [mapRS_df['DataSeriesName'].iloc[row_mapRS_df]]
data.topic_xValue = [xVals[tit]]
data.topic_meas_color = [meas_color[tit]]
data.topic_meas_note = [meas_note[tit]]
# call class function
data.create_dictMeasurements()
# create the body of the object
data.create_dictBody()
# create the TOP104_METRIC_REPORT as json for WL forecasts
top104_item = OrderedDict()
top104_item['header'] = data.header
top104_item['body'] = data.body
# write json (top104_item) to output file
if tit == 0:
flname = directory + "/" + 'TOP104_forecasts_WL_' + riverSections["value"][counter]['name'].replace(" ",
"") + ".txt"
else:
flname = directory + "/" + 'TOP104_forecasts_WL_Category_' + riverSections["value"][counter][
'name'].replace(" ", "") + ".txt"
with open(flname, 'w') as outfile:
json.dump(top104_item, outfile, indent=4)
top104_forecast += [top104_item]
return top104_forecast
#------------------------------------------------------------------------------------------------
# Create topic 104 for Water Level Measurement and its category for every CRITICAL River Section
# DataStream name:
#
def topic104FloodIndex_critical(directory, flag_last_run, response_forecast, max_yValues, meas_color, meas_note,
max_measurementID, max_measurementTimeStamp, dataSeriesID, dataSeriesName, xVals, dataStreamName,
dataStreamID, dataStreamDescript, dates, thresh, riverSections,
RiverSect_CountScale, counter, mapRS_df):
# Get the appropriate row of the mapRS_df
# mapRS_df['SensorID'] == riverSections["value"][counter]['@iot.id'])
row_mapRS_df = mapRS_df.index[ mapRS_df['SensorID'] == riverSections["value"][counter]['@iot.id'] ][0]
#print("row_mapRS_df = ", row_mapRS_df, " ID = ", riverSections["value"][counter]['@iot.id'] )
# Set variables for the body of the message
dataStreamGener = "CRCL"
dataStreamName += ['PWLm_Predicted Water Level for Critical Sections']
dataStreamID += ['FLCR_1032_CPWLm']
dataStreamName += ['PWLc_Predicted Water Level Category for Critical Sections']
dataStreamID += ['FLCR_1132_CPWLc']
if flag_last_run == True:
lastRunID = response_forecast['Datastreams'][0]["properties"]["lastRunId"]
# dataStreamID = str(lastRunID) + "_" + str(datetime.utcnow().microsecond)
dataStreamDescript += ["AMICO predictions of water level in the last run with ID:" + str(lastRunID)]
dataStreamDescript += ["AMICO predictions of water level category in the last run with ID:" + str(lastRunID)]
else:
ObsRunID = response_forecast['Datastreams'][0]['Observations'][0]["parameters"]["runId"]
# dataStreamID = str(ObsRunID) + "_" + str(datetime.utcnow().microsecond)
dataStreamDescript += [
"AMICO predictions of water level in the run with ID:" + str(ObsRunID) + " at dates: " + str(
dates[0]) + " to " + str(dates[1])]
dataStreamDescript += [
"AMICO predictions of water level category in the run with ID:" + str(ObsRunID) + " at dates: " + str(
dates[0]) + " to " + str(dates[1])]
lang = "en-US"
dataStreamCategory = "Met"
dataStreamSubCategory = "Flood"
# Position of the specific river section
#
#position = [round(loc_riverSection[0], 5), round(loc_riverSection[1], 5)]
position = [round(mapRS_df['Long'].iloc[row_mapRS_df], 5),
round(mapRS_df['Lat'].iloc[row_mapRS_df], 5)]
# Set variables for the header of the message
district = "Vicenza"
# Unique message identifier
msgIdent = datetime.utcnow().isoformat().replace(":", "").replace("-", "").replace(".", "MS")
sent_dateTime = datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
status = "Actual"
actionType = "Update"
scope = "Public"
code = 20190617001
# Call the class Top104_Metric_Report to create an object data of this class
#
top104_forecast_critical = []
for tit in range(0, 2):
# topic for forecast WL
data = Top104_Metric_Report(msgIdent, sent_dateTime, status, actionType, scope, district, code,
dataStreamGener, dataStreamID[tit], dataStreamName[tit], dataStreamDescript[tit],
lang, dataStreamCategory, dataStreamSubCategory, position)
# Record the thresholds for each river Section in the header note
data.topic_note = "Threshold_1=" + str(thresh[0]) + ", " + "Threshold_2=" + str(
thresh[1]) + ", " + "Threshold_3=" + str(thresh[2])
# create the header of the object
data.create_dictHeader()
# create the measurements of the object
#
# topic for forecast WL
data.topic_yValue = [max_yValues[tit]]
data.topic_measurementID = [max_measurementID[tit]]
data.topic_measurementTimeStamp = [max_measurementTimeStamp[tit]]
#data.topic_dataSeriesID = [dataSeriesID[tit]]
#data.topic_dataSeriesName = [dataSeriesName[tit]]
data.topic_dataSeriesID = [mapRS_df['DataSeriesID'].iloc[row_mapRS_df]]
data.topic_dataSeriesName = [mapRS_df['DataSeriesName'].iloc[row_mapRS_df]]
data.topic_xValue = [xVals[tit]]
data.topic_meas_color = [meas_color[tit]]
data.topic_meas_note = [meas_note[tit]]
# call class function
data.create_dictMeasurements()
# create the body of the object
data.create_dictBody()
# create the TOP104_METRIC_REPORT as json for WL forecasts
top104_item = OrderedDict()
top104_item['header'] = data.header
top104_item['body'] = data.body
# write json (top104_item) to output file
if tit == 0:
flname = directory + "/" + 'CRITICAL_TOP104_forecasts_WL' + '_' + riverSections["value"][counter]['name'].replace(" ","") + ".txt"
else:
flname = directory + "/" + 'CRITICAL_TOP104_forecasts_WL_Category' + '_' + riverSections["value"][counter][
'name'].replace(" ", "") + ".txt"
with open(flname, 'w') as outfile:
json.dump(top104_item, outfile, indent=4)
top104_forecast_critical += [top104_item]
return top104_forecast_critical
#------------------------------------------------------------------------------------
# Create Topic104 for the Predicted Flood Crisis Level per group of river sections
# and the whole region of interest
#
def topic104FloodOverall(directory, RiverSect_CountScale, OCL, total_top104_index, producer):
# Set variables for the body of the message
dataStreamGener = "CRCL"
dataStreamName = "PFLCL_Predicted Flood Crisis Level by Group of River Sections"
dataStreamID = "FLCR_1021_PCL"
lang = "en-US"
dataStreamCategory = "Met"
dataStreamSubCategory = "Flood"
# Create topics for each group of river sections
for it in range(len(OCL) - 1):
grID = it + 1
#dataStreamID = "1021" #+ str(grID)
#dataStreamDescript = "Estimation of the Flood Crisis Level in the pre-emergency phase for the " + \
# RiverSect_CountScale[it]['name'] + " of river sections"
dataStreamDescript = RiverSect_CountScale[it]['descr']
# Position of the center of group
position = [round(RiverSect_CountScale[it]['group_center_pos'][0], 5),
round(RiverSect_CountScale[it]['group_center_pos'][1], 5)]
# Set variables for the header of the message
district = "Vicenza"
# Unique message identifier
msgIdent = datetime.utcnow().isoformat().replace(":", "").replace("-", "").replace(".", "MS")
sent_dateTime = datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
status = "Actual"
actionType = "Update"
scope = "Public"
code = 20190617001
group_ocl_msg = Top104_Metric_Report(msgIdent, sent_dateTime, status, actionType, scope, district, code,
dataStreamGener, dataStreamID, dataStreamName, dataStreamDescript,
lang, dataStreamCategory, dataStreamSubCategory, position)
# create the header of the object
group_ocl_msg.create_dictHeader()
# create the measurements of the object
#
#group_ocl_msg.topic_yValue = [OCL[it]['ocl']]
group_ocl_msg.topic_yValue = [OCL[it]['ocl_val']]
group_ocl_msg.topic_measurementID = ['OCL_ID_1001' + str(it)]
group_ocl_msg.topic_measurementTimeStamp = [sent_dateTime]
group_ocl_msg.topic_dataSeriesID = ['RS_OCL_ID_1001' + str(it)]
group_ocl_msg.topic_dataSeriesName = [OCL[it]['name']]
group_ocl_msg.topic_xValue = [sent_dateTime]
group_ocl_msg.topic_meas_color = [OCL[it]['color']]
group_ocl_msg.topic_meas_note = [OCL[it]['note']]
# call class function
group_ocl_msg.create_dictMeasurements()
# create the body of the object
group_ocl_msg.create_dictBody()
# create the TOP104_METRIC_REPORT as json
top104_group_ocl = OrderedDict()
top104_group_ocl['header'] = group_ocl_msg.header
top104_group_ocl['body'] = group_ocl_msg.body
# write json (top104_group_ocl) to output file
flname = directory + "/" + "TOP104_PreAlert_Overall_Crisis_Level_Group_" + str(grID) + ".txt"
with open(flname, 'w') as outfile:
json.dump(top104_group_ocl, outfile, indent=4)
# Send messages to PSAP
print('Send message: Overall Crisis Level has been forwarded to logger!')
producer.send("TOP104_METRIC_REPORT", top104_group_ocl)
total_top104_index = total_top104_index + 1
# ----------------------------------------------
# Create topic for the whole Region of Interest
dataStreamName = "PFLCL_Predicted Flood Crisis Level Overall"
dataStreamID = "FLCR_1001_PCL"
dataStreamDescript = "Estimation of the Flood Crisis Level in the pre-emergency phase for all rivers in the Municipality/ Tutti I Corsi d’acqua nel Comune"
# Position of the center of Vicenza region
position = ["11.53885", "45.54497"]
# Set variables for the header of the message
district = "Vicenza"
# Unique message identifier
msgIdent = datetime.utcnow().isoformat().replace(":", "").replace("-", "").replace(".", "MS")
sent_dateTime = datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
status = "Actual"
actionType = "Update"
scope = "Public"
code = 20190617001
ocl_msg = Top104_Metric_Report(msgIdent, sent_dateTime, status, actionType, scope, district, code,
dataStreamGener, dataStreamID, dataStreamName, dataStreamDescript,
lang, dataStreamCategory, dataStreamSubCategory, position)
# create the header of the object
ocl_msg.create_dictHeader()
# create the measurements of the object
#
len_ocl = len(OCL)
#ocl_msg.topic_yValue = [OCL[len_ocl - 1]['ocl']]
ocl_msg.topic_yValue = [OCL[len_ocl - 1]['ocl_val']]
ocl_msg.topic_measurementID = ['OCL_ID_1001']
ocl_msg.topic_measurementTimeStamp = [sent_dateTime]
ocl_msg.topic_dataSeriesID = ['RS_OCL_ID_1001']
ocl_msg.topic_dataSeriesName = [OCL[len_ocl - 1]['name']]
ocl_msg.topic_xValue = [sent_dateTime]
ocl_msg.topic_meas_color = [OCL[len_ocl - 1]['color']]
ocl_msg.topic_meas_note = [OCL[len_ocl - 1]['note']]
# call class function
ocl_msg.create_dictMeasurements()
# create the body of the object
ocl_msg.create_dictBody()
# create the TOP104_METRIC_REPORT as json
top104_ocl = OrderedDict()
top104_ocl['header'] = ocl_msg.header
top104_ocl['body'] = ocl_msg.body
# write json (top104_ocl) to output file
flname = directory + "/" + "TOP104_PreAlert_Overall_Crisis_Level.txt"
with open(flname, 'w') as outfile:
json.dump(top104_ocl, outfile, indent=4)
# Send messages to PSAP
print('Send message: Overall Crisis Level has been forwarded to logger!')
producer.send("TOP104_METRIC_REPORT", top104_ocl)
total_top104_index = total_top104_index + 1
return total_top104_index
| 44.571154
| 159
| 0.628899
| 2,550
| 23,177
| 5.530588
| 0.101176
| 0.023825
| 0.01489
| 0.030632
| 0.886833
| 0.86705
| 0.834078
| 0.819755
| 0.806211
| 0.801957
| 0
| 0.027438
| 0.24205
| 23,177
| 519
| 160
| 44.657033
| 0.775374
| 0.220218
| 0
| 0.707483
| 0
| 0.006803
| 0.168793
| 0.011476
| 0.010204
| 0
| 0
| 0
| 0
| 1
| 0.013605
| false
| 0
| 0.034014
| 0
| 0.061224
| 0.027211
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
194be4249997dc7089b5e2991030642967a8e223
| 4,789
|
py
|
Python
|
tests/contexts/tests.py
|
josemarimanio/django-adminlte2-templates
|
d39ab5eaec674c4725015fe43fc93e74dce78a6e
|
[
"MIT"
] | 10
|
2020-03-21T10:50:11.000Z
|
2022-03-04T08:36:43.000Z
|
tests/contexts/tests.py
|
josemarimanio/django-adminlte2-templates
|
d39ab5eaec674c4725015fe43fc93e74dce78a6e
|
[
"MIT"
] | 6
|
2020-06-06T08:48:29.000Z
|
2021-06-10T18:49:35.000Z
|
tests/contexts/tests.py
|
josemarimanio/django-adminlte2-templates
|
d39ab5eaec674c4725015fe43fc93e74dce78a6e
|
[
"MIT"
] | 1
|
2021-09-14T02:00:43.000Z
|
2021-09-14T02:00:43.000Z
|
from django.test import Client
from django.test import SimpleTestCase
from adminlte2_templates.core import reverse
class ContextTestCase(SimpleTestCase):
def setUp(self):
self.client = Client()
def context_exists(self, context):
# Get view from 'layouts' unit test
response = self.client.get(reverse('layouts:default_boxed'))
try:
return response.context[context] is not None
except KeyError:
return False
def test_debug_context(self):
self.assertTrue(self.context_exists('DEBUG'))
def test_html_lang_context(self):
self.assertTrue(self.context_exists('ADMINLTE_HTML_LANG'))
def test_html_lang_bidi_context(self):
self.assertTrue(self.context_exists('ADMINLTE_HTML_LANG_BIDI'))
def test_skin_style_context(self):
self.assertTrue(self.context_exists('ADMINLTE_SKIN_STYLE'))
def test_control_style_context(self):
self.assertTrue(self.context_exists('ADMINLTE_CONTROL_STYLE'))
def test_footer_version_context(self):
self.assertTrue(self.context_exists('ADMINLTE_FOOTER_VERSION'))
def test_use_shim_context(self):
self.assertTrue(self.context_exists('ADMINLTE_USE_SHIM'))
def test_use_cdn_context(self):
self.assertTrue(self.context_exists('ADMINLTE_USE_CDN'))
def test_use_cdn_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_ADMINLTE_CSS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_ADMINLTE_CSS_SKIN'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_ADMINLTE_JS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_BOOTSTRAP_CSS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_BOOTSTRAP_JS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_JQUERY_JS_CORE'))
def test_use_cdn_context_false(self):
with self.settings(ADMINLTE_USE_CDN=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_ADMINLTE_CSS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_ADMINLTE_CSS_SKIN'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_ADMINLTE_JS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_BOOTSTRAP_CSS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_BOOTSTRAP_JS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_JQUERY_JS_CORE'))
# Shims
def test_use_cdn_and_use_shim_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_USE_SHIM=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_HTML5SHIV_JS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_RESPOND_JS_CORE'))
def test_use_cdn_and_use_shim_context_false(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_USE_SHIM=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_HTML5SHIV_JS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_RESPOND_JS_CORE'))
# DataTables
def test_use_cdn_and_enable_datatables_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_DATATABLES=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_DATATABLES_CSS_CORE'))
self.assertTrue(self.context_exists('ADMINLTE_CDN_DATATABLES_JS_CORE'))
def test_use_cdn_and_enable_datatables_context_false(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_DATATABLES=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_DATATABLES_CSS_CORE'))
self.assertFalse(self.context_exists('ADMINLTE_CDN_DATATABLES_JS_CORE'))
# Font Awesome
def test_use_cdn_and_enable_fontawesome_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_FONTAWESOME=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_FONTAWESOME_CSS_CORE'))
def test_use_cdn_and_enable_fontawesome_context_false(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_FONTAWESOME=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_FONTAWESOME_CSS_CORE'))
# Select2
def test_use_cdn_and_enable_select2_context_true(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_SELECT2=True):
self.assertTrue(self.context_exists('ADMINLTE_CDN_SELECT2_CSS_CORE'))
def test_use_cdn_and_enable_select2_context_false(self):
with self.settings(ADMINLTE_USE_CDN=True, ADMINLTE_STATIC_ENABLE_SELECT2=False):
self.assertFalse(self.context_exists('ADMINLTE_CDN_SELECT2_CSS_CORE'))
| 47.89
| 92
| 0.752767
| 614
| 4,789
| 5.421824
| 0.105863
| 0.128868
| 0.163412
| 0.232803
| 0.845599
| 0.837789
| 0.833884
| 0.821268
| 0.604986
| 0.2304
| 0
| 0.002488
| 0.160576
| 4,789
| 99
| 93
| 48.373737
| 0.825622
| 0.016496
| 0
| 0
| 0
| 0
| 0.187925
| 0.171981
| 0
| 0
| 0
| 0
| 0.444444
| 1
| 0.277778
| false
| 0
| 0.041667
| 0
| 0.361111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1955c192cb5f222fba0b47ccf2ae670f869d6c71
| 1,267
|
py
|
Python
|
A2C-Reinforce/agents.py
|
onimaru/Reinforcement_Learning
|
4c45b51a095cb0cb3c18f6a1542befdcab8a58a4
|
[
"MIT"
] | 1
|
2020-12-11T19:02:13.000Z
|
2020-12-11T19:02:13.000Z
|
A2C-Reinforce/agents.py
|
onimaru/Reinforcement_Learning
|
4c45b51a095cb0cb3c18f6a1542befdcab8a58a4
|
[
"MIT"
] | null | null | null |
A2C-Reinforce/agents.py
|
onimaru/Reinforcement_Learning
|
4c45b51a095cb0cb3c18f6a1542befdcab8a58a4
|
[
"MIT"
] | 3
|
2020-12-11T19:03:36.000Z
|
2022-02-27T20:28:24.000Z
|
import torch.nn as nn
class AgentA2C(nn.Module):
def __init__(self,state_shape,n_actions):
super().__init__()
self.name = 'a2c'
self.n_actions = n_actions
self.state_shape = state_shape
self.hidden1 = nn.Linear(self.state_shape, 100)
self.act1 = nn.ReLU()
self.hidden2 = nn.Linear(100, 100)
self.act2 = nn.ReLU()
self.out1 = nn.Linear(100, self.n_actions)
self.out2 = nn.Linear(100, 1)
def forward(self, state_t):
h = self.act1(self.hidden1(state_t))
h = self.act2(self.hidden2(h))
logits = self.out1(h)
value = self.out2(h)
return logits,value
class AgentReinforce(nn.Module):
def __init__(self,state_shape,n_actions):
super().__init__()
self.name = 'reinforce'
self.n_actions = n_actions
self.state_shape = state_shape
self.hidden1 = nn.Linear(self.state_shape, 100)
self.act1 = nn.ReLU()
self.hidden2 = nn.Linear(100, 100)
self.act2 = nn.ReLU()
self.out1 = nn.Linear(100, self.n_actions)
def forward(self, state_t):
h = self.act1(self.hidden1(state_t))
h = self.act2(self.hidden2(h))
logits = self.out1(h)
return logits
| 31.675
| 55
| 0.599842
| 176
| 1,267
| 4.113636
| 0.198864
| 0.099448
| 0.116022
| 0.060773
| 0.828729
| 0.828729
| 0.828729
| 0.828729
| 0.828729
| 0.828729
| 0
| 0.056522
| 0.273875
| 1,267
| 39
| 56
| 32.487179
| 0.730435
| 0
| 0
| 0.742857
| 0
| 0
| 0.009471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.028571
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
19639764da9c17f87e2123f52569720a0e1b0270
| 6,393
|
py
|
Python
|
gans/models/generators/latent_to_image/conditional_latent_to_image.py
|
tlatkowski/gans-2.0
|
974efc5bbcea39c0a7dec9405ba4514ada6dc39c
|
[
"MIT"
] | 78
|
2019-09-25T15:09:18.000Z
|
2022-02-09T09:56:15.000Z
|
gans/models/generators/latent_to_image/conditional_latent_to_image.py
|
tlatkowski/gans-2.0
|
974efc5bbcea39c0a7dec9405ba4514ada6dc39c
|
[
"MIT"
] | 23
|
2019-10-09T21:24:39.000Z
|
2022-03-12T00:00:53.000Z
|
gans/models/generators/latent_to_image/conditional_latent_to_image.py
|
tlatkowski/gans-2.0
|
974efc5bbcea39c0a7dec9405ba4514ada6dc39c
|
[
"MIT"
] | 18
|
2020-01-24T13:13:57.000Z
|
2022-02-15T18:58:12.000Z
|
from easydict import EasyDict as edict
from tensorflow.python.keras import Input
from tensorflow.python.keras import Model
from tensorflow.python.keras import layers
from gans.models import model
class LatentToImageConditionalGenerator(model.Model):
def __init__(
self,
model_parameters: edict,
):
super().__init__(model_parameters)
def define_model(self):
z = Input(shape=[self.model_parameters.latent_size])
class_id = Input(shape=[1])
embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
embedded_id = layers.Dense(units=7 * 7)(embedded_id)
embedded_id = layers.Reshape(target_shape=(7, 7, 1))(embedded_id)
x = layers.Dense(units=7 * 7 * 256, use_bias=False)(z)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Reshape((7, 7, 256))(x)
inputs = layers.Concatenate(axis=3)([x, embedded_id])
x = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')(x)
model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
return model
class LatentToImageCifar10CConditionalGenerator(model.Model):
def __init__(
self,
model_parameters: edict,
):
super().__init__(model_parameters)
def define_model(self):
z = Input(shape=[self.model_parameters.latent_size])
class_id = Input(shape=[1])
embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
embedded_id = layers.Dense(units=8 * 8)(embedded_id)
embedded_id = layers.Reshape(target_shape=(8, 8, 1))(embedded_id)
x = layers.Dense(units=8 * 8 * 256, use_bias=False)(z)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Reshape((8, 8, 256))(x)
inputs = layers.Concatenate(axis=3)([x, embedded_id])
x = layers.Conv2DTranspose(128, kernel_size=(4, 4), strides=(2, 2), padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(128, kernel_size=(5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2DTranspose(128, kernel_size=(4, 4), strides=(2, 2), padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(128, kernel_size=(5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(128, kernel_size=(5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization(momentum=0.9)(x)
x = layers.LeakyReLU(alpha=0.1)(x)
x = layers.Conv2D(3, kernel_size=(5, 5), strides=(1, 1), padding='same', use_bias=False, activation='tanh')(x)
model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
return model
class LatentToImageNNUpsamplingCifar10CConditionalGenerator(model.Model):
def __init__(
self,
model_parameters: edict,
):
super().__init__(model_parameters)
def define_model(self):
z = Input(shape=[self.model_parameters.latent_size])
class_id = Input(shape=[1])
embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
embedded_id = layers.Dense(units=8 * 8)(embedded_id)
embedded_id = layers.Reshape(target_shape=(8, 8, 1))(embedded_id)
x = layers.Dense(units=8 * 8 * 256, use_bias=False)(z)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Reshape((8, 8, 256))(x)
inputs = layers.Concatenate(axis=3)([x, embedded_id])
x = layers.Conv2D(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2D(3, (5, 5), strides=(1, 1), padding='same', use_bias=False, activation='tanh')(x)
model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
return model
class LatentToImageNNUpSamplingConditionalGenerator(model.Model):
def __init__(
self,
model_parameters: edict,
):
super().__init__(model_parameters)
def define_model(self):
z = Input(shape=[self.model_parameters.latent_size])
class_id = Input(shape=[1])
embedded_id = layers.Embedding(input_dim=10, output_dim=50)(class_id)
embedded_id = layers.Dense(units=7 * 7)(embedded_id)
embedded_id = layers.Reshape(target_shape=(7, 7, 1))(embedded_id)
x = layers.Dense(units=7 * 7 * 256, use_bias=False)(z)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.Reshape((7, 7, 256))(x)
inputs = layers.Concatenate(axis=3)([x, embedded_id])
x = layers.Conv2D(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)(inputs)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
x = layers.UpSampling2D()(x)
x = layers.Conv2D(1, (5, 5), strides=(1, 1), padding='same', use_bias=False, activation='tanh')(x)
model = Model(name=self.model_name, inputs=[z, class_id], outputs=x)
return model
| 36.531429
| 118
| 0.622869
| 858
| 6,393
| 4.490676
| 0.086247
| 0.103556
| 0.085128
| 0.066182
| 0.933818
| 0.909162
| 0.909162
| 0.909162
| 0.909162
| 0.907604
| 0
| 0.045308
| 0.223213
| 6,393
| 174
| 119
| 36.741379
| 0.730568
| 0
| 0
| 0.860656
| 0
| 0
| 0.011888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.040984
| 0
| 0.172131
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
19a5af7c094f1e230276b01122b494d2484995ec
| 10,430
|
py
|
Python
|
tests/libqif/core/test_Hyper.py
|
ramongonze/libqif
|
57be74a2342a303da5415a3d787855b8115e58f8
|
[
"MIT"
] | 2
|
2021-10-16T17:34:58.000Z
|
2021-11-16T16:15:13.000Z
|
tests/libqif/core/test_Hyper.py
|
ramongonze/libqif
|
57be74a2342a303da5415a3d787855b8115e58f8
|
[
"MIT"
] | null | null | null |
tests/libqif/core/test_Hyper.py
|
ramongonze/libqif
|
57be74a2342a303da5415a3d787855b8115e58f8
|
[
"MIT"
] | null | null | null |
from libqif.core.secrets import Secrets
from libqif.core.channel import Channel
from libqif.core.hyper import Hyper
import numpy as np
import unittest
import os
class TestHyper(unittest.TestCase):
def setUp(self):
self.prior1 = np.array([1/4, 1/2, 1/4])
self.channel1 = np.array([
[1/2, 1/2, 0, 0],
[ 0, 1/4, 1/2, 1/4],
[1/2, 1/3, 1/6, 0]
])
self.prior2 = np.array([1/3,1/3,0,1/3])
self.channel2 = np.array([
[1/2, 1/6, 1/3, 0],
[ 0, 1/3, 2/3, 0],
[ 0, 1/2, 0, 1/2],
[1/4, 1/4, 1/2, 0]
])
self.prior3 = np.array([1/4,1/4,1/4,1/4])
self.channel3 = np.array([
[1/2, 1/2, 0, 0],
[ 0, 0, 1, 0],
[1/2, 1/4, 0, 1/4],
[1/8, 1/8, 1/4, 1/2]
])
self.channel_identity_3 = np.identity(3)
self.channel_identity_4 = np.identity(4)
def test_valid_hypers(self):
secrets = Secrets(['x1','x2','x3'], self.prior1)
channel = Channel(secrets, ['y1','y2','y3','y4'], self.channel1)
hyper = Hyper(channel)
np.testing.assert_array_equal(hyper.joint, np.array([
[1/8, 1/8, 0, 0],
[ 0, 1/8, 1/4, 1/8],
[1/8, 1/12, 1/24, 0],
]))
np.testing.assert_array_equal(hyper.outer, np.array([1/4,1/3,7/24,1/8]))
np.testing.assert_array_equal(hyper.inners, np.array([
[1/2, 3/8, 0, 0],
[ 0, 3/8, 6/7, 1],
[1/2, 1/4, 1/7, 0]
]))
# Channel that leaks everything
channel = Channel(secrets, ['y1','y2','y3'], self.channel_identity_3)
hyper = Hyper(channel)
np.testing.assert_array_equal(hyper.outer, secrets.prior)
np.testing.assert_array_equal(hyper.inners, np.identity(3))
# Channel that leaks nothing
channel = Channel(secrets, ['y1'], np.ones((3,1)))
hyper = Hyper(channel)
np.testing.assert_array_equal(hyper.outer, np.array([1]))
np.testing.assert_array_equal(hyper.inners, np.array([secrets.prior]).T)
secrets = Secrets(['x1','x2','x3','x4'], self.prior2)
channel = Channel(secrets, ['y1','y2','y3','y4'], self.channel2)
hyper = Hyper(channel)
np.testing.assert_array_equal(hyper.joint, np.array([
[ 1/6, 1/18, 1/9, 0],
[ 0, 1/9, 2/9, 0],
[ 0, 0, 0, 0],
[1/12, 1/12, 1/6, 0],
]))
np.testing.assert_array_equal(hyper.outer, np.array([1/4,3/4]))
np.testing.assert_array_equal(hyper.inners, np.array([
[2/3, 2/9],
[ 0, 4/9],
[ 0, 0],
[1/3, 1/3]
]))
# Exercise 4.1 of The Science of Quantitative Information Flow book
secrets = Secrets(['x1','x2','x3','x4'], self.prior3)
channel = Channel(secrets, ['y1','y2','y3','y4'], self.channel3)
hyper = Hyper(channel)
np.testing.assert_array_equal(hyper.outer, np.array([9/32,7/32,10/32,6/32]))
np.testing.assert_array_equal(hyper.inners, np.array([
[4/9, 4/7, 0, 0],
[ 0, 0, 4/5, 0],
[4/9, 2/7, 0, 1/3],
[1/9, 1/7, 1/5, 2/3]
]))
# Channel that leaks everything
channel = Channel(secrets, ['y1','y2','y3','y4'], self.channel_identity_4)
hyper = Hyper(channel)
np.testing.assert_array_equal(hyper.outer, secrets.prior)
np.testing.assert_array_equal(hyper.inners, np.identity(4))
# Channel that leaks nothing
channel = Channel(secrets, ['y1'], np.ones((4,1)))
hyper = Hyper(channel)
np.testing.assert_array_equal(hyper.outer, np.array([1]))
np.testing.assert_array_equal(hyper.inners, np.array([secrets.prior]).T)
# Exercise 4.2 of The Science of Quantitative Information Flow book
secrets = Secrets(['x1','x2','x3','x4','x5','x6','x7','x8'], [1/8]*8)
channel_c = Channel(secrets, ['y1','y2'], np.array([
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[0,1],
[1,0],
]))
channel_d = Channel(secrets, ['y1','y2','y3','y4'], np.array([
[1,0,0,0],
[1,0,0,0],
[1,0,0,0],
[1,0,0,0],
[0,1,0,0],
[0,1,0,0],
[0,0,0,1],
[0,0,1,0],
]))
hyper_c = Hyper(channel_c)
hyper_d = Hyper(channel_d)
np.testing.assert_array_equal(hyper_c.outer, np.array([7/8,1/8]))
np.testing.assert_array_equal(hyper_c.inners, np.array([
[1/7, 0],
[1/7, 0],
[1/7, 0],
[1/7, 0],
[1/7, 0],
[1/7, 0],
[ 0, 1],
[1/7, 0],
]))
np.testing.assert_array_equal(hyper_d.outer, np.array([1/2,1/4,1/8,1/8]))
np.testing.assert_array_equal(hyper_d.inners, np.array([
[1/4, 0, 0, 0],
[1/4, 0, 0, 0],
[1/4, 0, 0, 0],
[1/4, 0, 0, 0],
[ 0, 1/2, 0, 0],
[ 0, 1/2, 0, 0],
[ 0, 0, 0, 1],
[ 0, 0, 1, 0],
]))
def test_valid_prior_updates(self):
secrets = Secrets(['x1','x2','x3'], [1,0,0])
channel = Channel(secrets, ['y1','y2','y3','y4'], self.channel1)
hyper = Hyper(channel)
hyper.update_prior(self.prior1)
np.testing.assert_array_equal(hyper.joint, np.array([
[1/8, 1/8, 0, 0],
[ 0, 1/8, 1/4, 1/8],
[1/8, 1/12, 1/24, 0],
]))
np.testing.assert_array_equal(hyper.outer, np.array([1/4,1/3,7/24,1/8]))
np.testing.assert_array_equal(hyper.inners, np.array([
[1/2, 3/8, 0, 0],
[ 0, 3/8, 6/7, 1],
[1/2, 1/4, 1/7, 0]
]))
# Channel that leaks everything
secrets = Secrets(['x1','x2','x3'], [1,0,0])
channel = Channel(secrets, ['y1','y2','y3'], self.channel_identity_3)
hyper = Hyper(channel)
hyper.update_prior(self.prior1)
np.testing.assert_array_equal(hyper.outer, secrets.prior)
np.testing.assert_array_equal(hyper.inners, np.identity(3))
# Channel that leaks nothing
secrets = Secrets(['x1','x2','x3'], [1,0,0])
channel = Channel(secrets, ['y1'], np.ones((3,1)))
hyper = Hyper(channel)
hyper.update_prior(self.prior1)
np.testing.assert_array_equal(hyper.outer, np.array([1]))
np.testing.assert_array_equal(hyper.inners, np.array([secrets.prior]).T)
secrets = Secrets(['x1','x2','x3','x4'], [1,0,0,0])
channel = Channel(secrets, ['y1','y2','y3','y4'], self.channel2)
hyper = Hyper(channel)
hyper.update_prior(self.prior2)
np.testing.assert_array_equal(hyper.joint, np.array([
[ 1/6, 1/18, 1/9, 0],
[ 0, 1/9, 2/9, 0],
[ 0, 0, 0, 0],
[1/12, 1/12, 1/6, 0],
]))
np.testing.assert_array_equal(hyper.outer, np.array([1/4,3/4]))
np.testing.assert_array_equal(hyper.inners, np.array([
[2/3, 2/9],
[ 0, 4/9],
[ 0, 0],
[1/3, 1/3]
]))
# Exercise 4.1 of The Science of Quantitative Information Flow book
secrets = Secrets(['x1','x2','x3','x4'], [1,0,0,0])
channel = Channel(secrets, ['y1','y2','y3','y4'], self.channel3)
hyper = Hyper(channel)
hyper.update_prior(self.prior3)
np.testing.assert_array_equal(hyper.outer, np.array([9/32,7/32,10/32,6/32]))
np.testing.assert_array_equal(hyper.inners, np.array([
[4/9, 4/7, 0, 0],
[ 0, 0, 4/5, 0],
[4/9, 2/7, 0, 1/3],
[1/9, 1/7, 1/5, 2/3]
]))
# Channel that leaks everything
secrets = Secrets(['x1','x2','x3','x4'], [1,0,0,0])
channel = Channel(secrets, ['y1','y2','y3','y4'], self.channel_identity_4)
hyper = Hyper(channel)
hyper.update_prior(self.prior3)
np.testing.assert_array_equal(hyper.outer, secrets.prior)
np.testing.assert_array_equal(hyper.inners, np.identity(4))
# Channel that leaks nothing
secrets = Secrets(['x1','x2','x3','x4'], [1,0,0,0])
channel = Channel(secrets, ['y1'], np.ones((4,1)))
hyper = Hyper(channel)
hyper.update_prior(self.prior3)
np.testing.assert_array_equal(hyper.outer, np.array([1]))
np.testing.assert_array_equal(hyper.inners, np.array([secrets.prior]).T)
# Exercise 4.2 of The Science of Quantitative Information Flow book
secrets = Secrets(['x1','x2','x3','x4','x5','x6','x7','x8'], [1,0,0,0,0,0,0,0])
channel_c = Channel(secrets, ['y1','y2'], np.array([
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[1,0],
[0,1],
[1,0],
]))
channel_d = Channel(secrets, ['y1','y2','y3','y4'], np.array([
[1,0,0,0],
[1,0,0,0],
[1,0,0,0],
[1,0,0,0],
[0,1,0,0],
[0,1,0,0],
[0,0,0,1],
[0,0,1,0],
]))
hyper_c = Hyper(channel_c)
hyper_c.update_prior([1/8]*8)
hyper_d = Hyper(channel_d)
hyper_d.update_prior([1/8]*8)
np.testing.assert_array_equal(hyper_c.outer, np.array([7/8,1/8]))
np.testing.assert_array_equal(hyper_c.inners, np.array([
[1/7, 0],
[1/7, 0],
[1/7, 0],
[1/7, 0],
[1/7, 0],
[1/7, 0],
[ 0, 1],
[1/7, 0],
]))
np.testing.assert_array_equal(hyper_d.outer, np.array([1/2,1/4,1/8,1/8]))
np.testing.assert_array_equal(hyper_d.inners, np.array([
[1/4, 0, 0, 0],
[1/4, 0, 0, 0],
[1/4, 0, 0, 0],
[1/4, 0, 0, 0],
[ 0, 1/2, 0, 0],
[ 0, 1/2, 0, 0],
[ 0, 0, 0, 1],
[ 0, 0, 1, 0],
]))
if __name__ == '__main__':
unittest.main()
| 35.841924
| 87
| 0.474113
| 1,522
| 10,430
| 3.159658
| 0.055191
| 0.049075
| 0.038677
| 0.166355
| 0.909129
| 0.887295
| 0.875234
| 0.867956
| 0.867748
| 0.861926
| 0
| 0.115796
| 0.335954
| 10,430
| 291
| 88
| 35.841924
| 0.578545
| 0.047076
| 0
| 0.855422
| 0
| 0
| 0.022162
| 0
| 0
| 0
| 0
| 0
| 0.160643
| 1
| 0.012048
| false
| 0
| 0.024096
| 0
| 0.040161
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2727c0d6df12e6bdd7d028f7aa646508e90e92dd
| 74
|
py
|
Python
|
Themes/__init__.py
|
serumstudio/SerumWriter
|
5e212b49e8d3da3890cd685a985438d298db5e26
|
[
"MIT"
] | 2
|
2022-03-24T05:29:02.000Z
|
2022-03-24T11:01:44.000Z
|
Themes/__init__.py
|
serumstudio/SerumWriter
|
5e212b49e8d3da3890cd685a985438d298db5e26
|
[
"MIT"
] | null | null | null |
Themes/__init__.py
|
serumstudio/SerumWriter
|
5e212b49e8d3da3890cd685a985438d298db5e26
|
[
"MIT"
] | null | null | null |
from SerumWriter.Themes import Dark
from SerumWriter.Themes import Light
| 18.5
| 36
| 0.851351
| 10
| 74
| 6.3
| 0.6
| 0.47619
| 0.666667
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121622
| 74
| 3
| 37
| 24.666667
| 0.969231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
27ccf4b8dac292cfa6a4767e7d8503ce4f4ea1de
| 130
|
py
|
Python
|
basic_skills/views.py
|
bluebamus/django_miscellaneous_book
|
22e0851b3a07aeef94bb723b334f036ed5c17f72
|
[
"MIT"
] | null | null | null |
basic_skills/views.py
|
bluebamus/django_miscellaneous_book
|
22e0851b3a07aeef94bb723b334f036ed5c17f72
|
[
"MIT"
] | null | null | null |
basic_skills/views.py
|
bluebamus/django_miscellaneous_book
|
22e0851b3a07aeef94bb723b334f036ed5c17f72
|
[
"MIT"
] | null | null | null |
from .views_ex.views_cbv_mixin import *
from .views_ex.views_messages import *
from .views_ex.views_two_scoops_of_django import *
| 32.5
| 50
| 0.838462
| 22
| 130
| 4.5
| 0.5
| 0.272727
| 0.333333
| 0.484848
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092308
| 130
| 3
| 51
| 43.333333
| 0.838983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
fd669be7b63a0aea335698a75555046607442b5d
| 1,187
|
py
|
Python
|
python--exercicios/ex052.py
|
Eliezer2000/python
|
12abb54c6536acb2f36b8f34bf51ec765857eb75
|
[
"MIT"
] | null | null | null |
python--exercicios/ex052.py
|
Eliezer2000/python
|
12abb54c6536acb2f36b8f34bf51ec765857eb75
|
[
"MIT"
] | null | null | null |
python--exercicios/ex052.py
|
Eliezer2000/python
|
12abb54c6536acb2f36b8f34bf51ec765857eb75
|
[
"MIT"
] | null | null | null |
num = int(input('Digite um número : '))
tot = 0
for c in range(1, num +1):
if num % c == 0:
print('\033[33m', end='')
tot += 1
else:
print('\033[31m', end='')
print('{}'.format(c), end=' ')
print('O número {} foi dividido {} vezes'.format(num, tot))
if tot == 2:
print('E por isso ele é PRIMO')
else:
print('Por isso ele não é PRIMO')
num = int(input('Digite um número : '))
tot = 0
for c in range(1, num + 1):
if num % c == 0:
print('\033[33m', end=' ')
tot += 1
else:
print('\033[31m', end=' ')
print('{}'.format(c), end=' ')
print('O número {} foi divisivel {} vezes'.format(num, tot))
if tot == 2:
print('Por isso ele é PRIMO')
else:
print('Por isso ele não é primo')
num = int(input('Digite um número : '))
tot = 0
for c in range(1, num + 1):
print('{}'.format(c), end=' ')
if num % c == 0:
print('\033[34m', end=' ')
tot += 1
else:
print('\033[31m', end=' ')
print('{}'.format(c), end=' ')
print('O número {} foi dividido em {} vezes '.format(num, tot))
if tot == 2:
print('Por isso ele é PRIMO')
else:
print('Ele não é primo')
| 21.581818
| 63
| 0.510531
| 184
| 1,187
| 3.293478
| 0.195652
| 0.079208
| 0.082508
| 0.09901
| 0.930693
| 0.930693
| 0.905941
| 0.905941
| 0.859736
| 0.859736
| 0
| 0.056272
| 0.281382
| 1,187
| 54
| 64
| 21.981481
| 0.654162
| 0
| 0
| 0.813953
| 0
| 0
| 0.296108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.44186
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
e34952d31eb22086b5b6d916f98bf6047324cad8
| 152
|
py
|
Python
|
libs/yowsup/yowsup/yowsup/layers/protocol_messages/protocolentities/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 22
|
2017-07-14T20:01:17.000Z
|
2022-03-08T14:22:39.000Z
|
libs/yowsup/yowsup/yowsup/layers/protocol_messages/protocolentities/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 6
|
2017-07-14T21:03:50.000Z
|
2021-06-10T19:08:32.000Z
|
libs/yowsup/yowsup/yowsup/layers/protocol_messages/protocolentities/__init__.py
|
akshitpradhan/TomHack
|
837226e7b38de1140c19bc2d478eeb9e379ed1fd
|
[
"MIT"
] | 13
|
2017-07-14T20:13:14.000Z
|
2020-11-12T08:06:05.000Z
|
from .message_text import TextMessageProtocolEntity
from .message import MessageProtocolEntity
from .message_text_broadcast import BroadcastTextMessage
| 38
| 56
| 0.901316
| 15
| 152
| 8.933333
| 0.533333
| 0.246269
| 0.223881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 152
| 3
| 57
| 50.666667
| 0.957143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8b729053d953532b16108805ecf50163a9bc77d4
| 103
|
py
|
Python
|
01_analysis.py
|
karlbenedict/carc-testing
|
654305317525c8eab35adadf56ce763375b83151
|
[
"Apache-2.0"
] | null | null | null |
01_analysis.py
|
karlbenedict/carc-testing
|
654305317525c8eab35adadf56ce763375b83151
|
[
"Apache-2.0"
] | null | null | null |
01_analysis.py
|
karlbenedict/carc-testing
|
654305317525c8eab35adadf56ce763375b83151
|
[
"Apache-2.0"
] | null | null | null |
import datetime
# print 'hello world' and the date
print 'hello world' + str(datetime.datetime.now())
| 20.6
| 50
| 0.737864
| 15
| 103
| 5.066667
| 0.666667
| 0.263158
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145631
| 103
| 4
| 51
| 25.75
| 0.863636
| 0.31068
| 0
| 0
| 0
| 0
| 0.15942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
8b7dfae1ed13816bc987a173c323603c0f9c4c42
| 2,144
|
py
|
Python
|
script/lib/get_small_cmd.py
|
cyberphantom/Selfie-Drone-Stick
|
7ac9fa49445c63a4fdbcb20db47ae877624ea03b
|
[
"MIT"
] | 2
|
2021-07-29T00:55:43.000Z
|
2022-03-21T17:36:51.000Z
|
script/lib/get_small_cmd.py
|
cyberphantom/Selfie-Drone-Stick
|
7ac9fa49445c63a4fdbcb20db47ae877624ea03b
|
[
"MIT"
] | null | null | null |
script/lib/get_small_cmd.py
|
cyberphantom/Selfie-Drone-Stick
|
7ac9fa49445c63a4fdbcb20db47ae877624ea03b
|
[
"MIT"
] | 1
|
2022-03-21T17:36:52.000Z
|
2022-03-21T17:36:52.000Z
|
#!/usr/bin/env python
from geometry_msgs.msg import Twist
def cmdVel(ac, vel):
vel_cmd = Twist()
action = ac
if action == 0: # Hover
vel_cmd.linear.x = 0.0
vel_cmd.angular.x = 0.0
vel_cmd.linear.y = 0.0
vel_cmd.angular.y = 0.0
vel_cmd.linear.z = 0.0
vel_cmd.angular.z = 0.0
if action == 1: # Forward
vel_cmd.linear.x = vel
vel_cmd.angular.x = 0.0
vel_cmd.linear.y = 0.0
vel_cmd.angular.y = 0.0
vel_cmd.linear.z = 0.0
vel_cmd.angular.z = 0.0
elif action == 2: # Backword
vel_cmd.linear.x = -vel
vel_cmd.angular.x = 0.0
vel_cmd.linear.y = 0.0
vel_cmd.angular.y = 0.0
vel_cmd.linear.z = 0.0
vel_cmd.angular.z = 0.0
elif action == 3: # Tilt Left
vel_cmd.linear.x = 0.0
vel_cmd.angular.x = 0.0
vel_cmd.linear.y = vel
vel_cmd.angular.y = 0.0
vel_cmd.linear.z = 0.0
vel_cmd.angular.z = 0.0
elif action == 4: # Tilt Right
vel_cmd.linear.x = 0.0
vel_cmd.angular.x = 0.0
vel_cmd.linear.y = -vel
vel_cmd.angular.y = 0.0
vel_cmd.linear.z = 0.0
vel_cmd.angular.z = 0.0
elif action == 5: # Up
vel_cmd.linear.x = 0.0
vel_cmd.angular.x = 0.0
vel_cmd.linear.y = 0.0
vel_cmd.angular.y = 0.0
vel_cmd.linear.z = vel
vel_cmd.angular.z = 0.0
elif action == 6: # Down
vel_cmd.linear.x = 0.0
vel_cmd.angular.x = 0.0
vel_cmd.linear.y = 0.0
vel_cmd.angular.y = 0.0
vel_cmd.linear.z = -vel
vel_cmd.angular.z = 0.0
elif action == 7: # Ang Left
vel_cmd.linear.x = 0.0
vel_cmd.angular.x = 0.0
vel_cmd.linear.y = 0.0
vel_cmd.angular.y = 0.0
vel_cmd.linear.z = 0.0
vel_cmd.angular.z = vel
elif action == 8: # Ang Right
vel_cmd.linear.x = 0.0
vel_cmd.angular.x = 0.0
vel_cmd.linear.y = 0.0
vel_cmd.angular.y = 0.0
vel_cmd.linear.z = 0.0
vel_cmd.angular.z = -vel
return vel_cmd
| 26.469136
| 35
| 0.534049
| 378
| 2,144
| 2.878307
| 0.111111
| 0.308824
| 0.179228
| 0.286765
| 0.848346
| 0.848346
| 0.848346
| 0.848346
| 0.848346
| 0.848346
| 0
| 0.071784
| 0.34375
| 2,144
| 80
| 36
| 26.8
| 0.701493
| 0.042444
| 0
| 0.676471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.014706
| 0
| 0.044118
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8bb5c11b19be79bb1fa99db94a009e0786f898a8
| 40,187
|
py
|
Python
|
sdk/python/pulumi_oci/core/security_list.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/core/security_list.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/core/security_list.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['SecurityListArgs', 'SecurityList']
@pulumi.input_type
class SecurityListArgs:
def __init__(__self__, *,
compartment_id: pulumi.Input[str],
vcn_id: pulumi.Input[str],
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
egress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListEgressSecurityRuleArgs']]]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
ingress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListIngressSecurityRuleArgs']]]] = None):
"""
The set of arguments for constructing a SecurityList resource.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of the compartment to contain the security list.
:param pulumi.Input[str] vcn_id: The OCID of the VCN the security list belongs to.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param pulumi.Input[str] display_name: (Updatable) A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param pulumi.Input[Sequence[pulumi.Input['SecurityListEgressSecurityRuleArgs']]] egress_security_rules: (Updatable) Rules for allowing egress IP packets.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param pulumi.Input[Sequence[pulumi.Input['SecurityListIngressSecurityRuleArgs']]] ingress_security_rules: (Updatable) Rules for allowing ingress IP packets.
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "vcn_id", vcn_id)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if egress_security_rules is not None:
pulumi.set(__self__, "egress_security_rules", egress_security_rules)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if ingress_security_rules is not None:
pulumi.set(__self__, "ingress_security_rules", ingress_security_rules)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Input[str]:
"""
(Updatable) The OCID of the compartment to contain the security list.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="vcnId")
def vcn_id(self) -> pulumi.Input[str]:
"""
The OCID of the VCN the security list belongs to.
"""
return pulumi.get(self, "vcn_id")
@vcn_id.setter
def vcn_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vcn_id", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="egressSecurityRules")
def egress_security_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListEgressSecurityRuleArgs']]]]:
"""
(Updatable) Rules for allowing egress IP packets.
"""
return pulumi.get(self, "egress_security_rules")
@egress_security_rules.setter
def egress_security_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListEgressSecurityRuleArgs']]]]):
pulumi.set(self, "egress_security_rules", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="ingressSecurityRules")
def ingress_security_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListIngressSecurityRuleArgs']]]]:
"""
(Updatable) Rules for allowing ingress IP packets.
"""
return pulumi.get(self, "ingress_security_rules")
@ingress_security_rules.setter
def ingress_security_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListIngressSecurityRuleArgs']]]]):
pulumi.set(self, "ingress_security_rules", value)
@pulumi.input_type
class _SecurityListState:
def __init__(__self__, *,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
egress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListEgressSecurityRuleArgs']]]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
ingress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListIngressSecurityRuleArgs']]]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
vcn_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SecurityList resources.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of the compartment to contain the security list.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param pulumi.Input[str] display_name: (Updatable) A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param pulumi.Input[Sequence[pulumi.Input['SecurityListEgressSecurityRuleArgs']]] egress_security_rules: (Updatable) Rules for allowing egress IP packets.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param pulumi.Input[Sequence[pulumi.Input['SecurityListIngressSecurityRuleArgs']]] ingress_security_rules: (Updatable) Rules for allowing ingress IP packets.
:param pulumi.Input[str] state: The security list's current state.
:param pulumi.Input[str] time_created: The date and time the security list was created, in the format defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2016-08-25T21:10:29.600Z`
:param pulumi.Input[str] vcn_id: The OCID of the VCN the security list belongs to.
"""
if compartment_id is not None:
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if egress_security_rules is not None:
pulumi.set(__self__, "egress_security_rules", egress_security_rules)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if ingress_security_rules is not None:
pulumi.set(__self__, "ingress_security_rules", ingress_security_rules)
if state is not None:
pulumi.set(__self__, "state", state)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
if vcn_id is not None:
pulumi.set(__self__, "vcn_id", vcn_id)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The OCID of the compartment to contain the security list.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="egressSecurityRules")
def egress_security_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListEgressSecurityRuleArgs']]]]:
"""
(Updatable) Rules for allowing egress IP packets.
"""
return pulumi.get(self, "egress_security_rules")
@egress_security_rules.setter
def egress_security_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListEgressSecurityRuleArgs']]]]):
pulumi.set(self, "egress_security_rules", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="ingressSecurityRules")
def ingress_security_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListIngressSecurityRuleArgs']]]]:
"""
(Updatable) Rules for allowing ingress IP packets.
"""
return pulumi.get(self, "ingress_security_rules")
@ingress_security_rules.setter
def ingress_security_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SecurityListIngressSecurityRuleArgs']]]]):
pulumi.set(self, "ingress_security_rules", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The security list's current state.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the security list was created, in the format defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="vcnId")
def vcn_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the VCN the security list belongs to.
"""
return pulumi.get(self, "vcn_id")
@vcn_id.setter
def vcn_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vcn_id", value)
class SecurityList(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
egress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListEgressSecurityRuleArgs']]]]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
ingress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListIngressSecurityRuleArgs']]]]] = None,
vcn_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Security List resource in Oracle Cloud Infrastructure Core service.
Creates a new security list for the specified VCN. For more information
about security lists, see [Security Lists](https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/securitylists.htm).
For information on the number of rules you can have in a security list, see
[Service Limits](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/servicelimits.htm).
For the purposes of access control, you must provide the OCID of the compartment where you want the security
list to reside. Notice that the security list doesn't have to be in the same compartment as the VCN, subnets,
or other Networking Service components. If you're not sure which compartment to use, put the security
list in the same compartment as the VCN. For more information about compartments and access control, see
[Overview of the IAM Service](https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/overview.htm). For information about OCIDs, see
[Resource Identifiers](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
You may optionally specify a *display name* for the security list, otherwise a default is provided.
It does not have to be unique, and you can change it. Avoid entering confidential information.
For more information on configuring a VCN's default security list, see [Managing Default VCN Resources](https://www.terraform.io/docs/providers/oci/guides/managing_default_resources.html)
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_security_list = oci.core.SecurityList("testSecurityList",
compartment_id=var["compartment_id"],
vcn_id=oci_core_vcn["test_vcn"]["id"],
defined_tags={
"Operations.CostCenter": "42",
},
display_name=var["security_list_display_name"],
egress_security_rules=[oci.core.SecurityListEgressSecurityRuleArgs(
destination=var["security_list_egress_security_rules_destination"],
protocol=var["security_list_egress_security_rules_protocol"],
description=var["security_list_egress_security_rules_description"],
destination_type=var["security_list_egress_security_rules_destination_type"],
icmp_options=oci.core.SecurityListEgressSecurityRuleIcmpOptionsArgs(
type=var["security_list_egress_security_rules_icmp_options_type"],
code=var["security_list_egress_security_rules_icmp_options_code"],
),
stateless=var["security_list_egress_security_rules_stateless"],
tcp_options=oci.core.SecurityListEgressSecurityRuleTcpOptionsArgs(
max=var["security_list_egress_security_rules_tcp_options_destination_port_range_max"],
min=var["security_list_egress_security_rules_tcp_options_destination_port_range_min"],
source_port_range=oci.core.SecurityListEgressSecurityRuleTcpOptionsSourcePortRangeArgs(
max=var["security_list_egress_security_rules_tcp_options_source_port_range_max"],
min=var["security_list_egress_security_rules_tcp_options_source_port_range_min"],
),
),
udp_options=oci.core.SecurityListEgressSecurityRuleUdpOptionsArgs(
max=var["security_list_egress_security_rules_udp_options_destination_port_range_max"],
min=var["security_list_egress_security_rules_udp_options_destination_port_range_min"],
source_port_range=oci.core.SecurityListEgressSecurityRuleUdpOptionsSourcePortRangeArgs(
max=var["security_list_egress_security_rules_udp_options_source_port_range_max"],
min=var["security_list_egress_security_rules_udp_options_source_port_range_min"],
),
),
)],
freeform_tags={
"Department": "Finance",
},
ingress_security_rules=[oci.core.SecurityListIngressSecurityRuleArgs(
protocol=var["security_list_ingress_security_rules_protocol"],
source=var["security_list_ingress_security_rules_source"],
description=var["security_list_ingress_security_rules_description"],
icmp_options=oci.core.SecurityListIngressSecurityRuleIcmpOptionsArgs(
type=var["security_list_ingress_security_rules_icmp_options_type"],
code=var["security_list_ingress_security_rules_icmp_options_code"],
),
source_type=var["security_list_ingress_security_rules_source_type"],
stateless=var["security_list_ingress_security_rules_stateless"],
tcp_options=oci.core.SecurityListIngressSecurityRuleTcpOptionsArgs(
max=var["security_list_ingress_security_rules_tcp_options_destination_port_range_max"],
min=var["security_list_ingress_security_rules_tcp_options_destination_port_range_min"],
source_port_range=oci.core.SecurityListIngressSecurityRuleTcpOptionsSourcePortRangeArgs(
max=var["security_list_ingress_security_rules_tcp_options_source_port_range_max"],
min=var["security_list_ingress_security_rules_tcp_options_source_port_range_min"],
),
),
udp_options=oci.core.SecurityListIngressSecurityRuleUdpOptionsArgs(
max=var["security_list_ingress_security_rules_udp_options_destination_port_range_max"],
min=var["security_list_ingress_security_rules_udp_options_destination_port_range_min"],
source_port_range=oci.core.SecurityListIngressSecurityRuleUdpOptionsSourcePortRangeArgs(
max=var["security_list_ingress_security_rules_udp_options_source_port_range_max"],
min=var["security_list_ingress_security_rules_udp_options_source_port_range_min"],
),
),
)])
```
## Import
SecurityLists can be imported using the `id`, e.g.
```sh
$ pulumi import oci:core/securityList:SecurityList test_security_list "id"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of the compartment to contain the security list.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param pulumi.Input[str] display_name: (Updatable) A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListEgressSecurityRuleArgs']]]] egress_security_rules: (Updatable) Rules for allowing egress IP packets.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListIngressSecurityRuleArgs']]]] ingress_security_rules: (Updatable) Rules for allowing ingress IP packets.
:param pulumi.Input[str] vcn_id: The OCID of the VCN the security list belongs to.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecurityListArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Security List resource in Oracle Cloud Infrastructure Core service.
Creates a new security list for the specified VCN. For more information
about security lists, see [Security Lists](https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/securitylists.htm).
For information on the number of rules you can have in a security list, see
[Service Limits](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/servicelimits.htm).
For the purposes of access control, you must provide the OCID of the compartment where you want the security
list to reside. Notice that the security list doesn't have to be in the same compartment as the VCN, subnets,
or other Networking Service components. If you're not sure which compartment to use, put the security
list in the same compartment as the VCN. For more information about compartments and access control, see
[Overview of the IAM Service](https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/overview.htm). For information about OCIDs, see
[Resource Identifiers](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
You may optionally specify a *display name* for the security list, otherwise a default is provided.
It does not have to be unique, and you can change it. Avoid entering confidential information.
For more information on configuring a VCN's default security list, see [Managing Default VCN Resources](https://www.terraform.io/docs/providers/oci/guides/managing_default_resources.html)
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_security_list = oci.core.SecurityList("testSecurityList",
compartment_id=var["compartment_id"],
vcn_id=oci_core_vcn["test_vcn"]["id"],
defined_tags={
"Operations.CostCenter": "42",
},
display_name=var["security_list_display_name"],
egress_security_rules=[oci.core.SecurityListEgressSecurityRuleArgs(
destination=var["security_list_egress_security_rules_destination"],
protocol=var["security_list_egress_security_rules_protocol"],
description=var["security_list_egress_security_rules_description"],
destination_type=var["security_list_egress_security_rules_destination_type"],
icmp_options=oci.core.SecurityListEgressSecurityRuleIcmpOptionsArgs(
type=var["security_list_egress_security_rules_icmp_options_type"],
code=var["security_list_egress_security_rules_icmp_options_code"],
),
stateless=var["security_list_egress_security_rules_stateless"],
tcp_options=oci.core.SecurityListEgressSecurityRuleTcpOptionsArgs(
max=var["security_list_egress_security_rules_tcp_options_destination_port_range_max"],
min=var["security_list_egress_security_rules_tcp_options_destination_port_range_min"],
source_port_range=oci.core.SecurityListEgressSecurityRuleTcpOptionsSourcePortRangeArgs(
max=var["security_list_egress_security_rules_tcp_options_source_port_range_max"],
min=var["security_list_egress_security_rules_tcp_options_source_port_range_min"],
),
),
udp_options=oci.core.SecurityListEgressSecurityRuleUdpOptionsArgs(
max=var["security_list_egress_security_rules_udp_options_destination_port_range_max"],
min=var["security_list_egress_security_rules_udp_options_destination_port_range_min"],
source_port_range=oci.core.SecurityListEgressSecurityRuleUdpOptionsSourcePortRangeArgs(
max=var["security_list_egress_security_rules_udp_options_source_port_range_max"],
min=var["security_list_egress_security_rules_udp_options_source_port_range_min"],
),
),
)],
freeform_tags={
"Department": "Finance",
},
ingress_security_rules=[oci.core.SecurityListIngressSecurityRuleArgs(
protocol=var["security_list_ingress_security_rules_protocol"],
source=var["security_list_ingress_security_rules_source"],
description=var["security_list_ingress_security_rules_description"],
icmp_options=oci.core.SecurityListIngressSecurityRuleIcmpOptionsArgs(
type=var["security_list_ingress_security_rules_icmp_options_type"],
code=var["security_list_ingress_security_rules_icmp_options_code"],
),
source_type=var["security_list_ingress_security_rules_source_type"],
stateless=var["security_list_ingress_security_rules_stateless"],
tcp_options=oci.core.SecurityListIngressSecurityRuleTcpOptionsArgs(
max=var["security_list_ingress_security_rules_tcp_options_destination_port_range_max"],
min=var["security_list_ingress_security_rules_tcp_options_destination_port_range_min"],
source_port_range=oci.core.SecurityListIngressSecurityRuleTcpOptionsSourcePortRangeArgs(
max=var["security_list_ingress_security_rules_tcp_options_source_port_range_max"],
min=var["security_list_ingress_security_rules_tcp_options_source_port_range_min"],
),
),
udp_options=oci.core.SecurityListIngressSecurityRuleUdpOptionsArgs(
max=var["security_list_ingress_security_rules_udp_options_destination_port_range_max"],
min=var["security_list_ingress_security_rules_udp_options_destination_port_range_min"],
source_port_range=oci.core.SecurityListIngressSecurityRuleUdpOptionsSourcePortRangeArgs(
max=var["security_list_ingress_security_rules_udp_options_source_port_range_max"],
min=var["security_list_ingress_security_rules_udp_options_source_port_range_min"],
),
),
)])
```
## Import
SecurityLists can be imported using the `id`, e.g.
```sh
$ pulumi import oci:core/securityList:SecurityList test_security_list "id"
```
:param str resource_name: The name of the resource.
:param SecurityListArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecurityListArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
egress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListEgressSecurityRuleArgs']]]]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
ingress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListIngressSecurityRuleArgs']]]]] = None,
vcn_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecurityListArgs.__new__(SecurityListArgs)
if compartment_id is None and not opts.urn:
raise TypeError("Missing required property 'compartment_id'")
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["display_name"] = display_name
__props__.__dict__["egress_security_rules"] = egress_security_rules
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["ingress_security_rules"] = ingress_security_rules
if vcn_id is None and not opts.urn:
raise TypeError("Missing required property 'vcn_id'")
__props__.__dict__["vcn_id"] = vcn_id
__props__.__dict__["state"] = None
__props__.__dict__["time_created"] = None
super(SecurityList, __self__).__init__(
'oci:core/securityList:SecurityList',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
egress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListEgressSecurityRuleArgs']]]]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
ingress_security_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListIngressSecurityRuleArgs']]]]] = None,
state: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
vcn_id: Optional[pulumi.Input[str]] = None) -> 'SecurityList':
"""
Get an existing SecurityList resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] compartment_id: (Updatable) The OCID of the compartment to contain the security list.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param pulumi.Input[str] display_name: (Updatable) A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListEgressSecurityRuleArgs']]]] egress_security_rules: (Updatable) Rules for allowing egress IP packets.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SecurityListIngressSecurityRuleArgs']]]] ingress_security_rules: (Updatable) Rules for allowing ingress IP packets.
:param pulumi.Input[str] state: The security list's current state.
:param pulumi.Input[str] time_created: The date and time the security list was created, in the format defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2016-08-25T21:10:29.600Z`
:param pulumi.Input[str] vcn_id: The OCID of the VCN the security list belongs to.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecurityListState.__new__(_SecurityListState)
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["display_name"] = display_name
__props__.__dict__["egress_security_rules"] = egress_security_rules
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["ingress_security_rules"] = ingress_security_rules
__props__.__dict__["state"] = state
__props__.__dict__["time_created"] = time_created
__props__.__dict__["vcn_id"] = vcn_id
return SecurityList(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Output[str]:
"""
(Updatable) The OCID of the compartment to contain the security list.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
(Updatable) A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="egressSecurityRules")
def egress_security_rules(self) -> pulumi.Output[Optional[Sequence['outputs.SecurityListEgressSecurityRule']]]:
"""
(Updatable) Rules for allowing egress IP packets.
"""
return pulumi.get(self, "egress_security_rules")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter(name="ingressSecurityRules")
def ingress_security_rules(self) -> pulumi.Output[Optional[Sequence['outputs.SecurityListIngressSecurityRule']]]:
"""
(Updatable) Rules for allowing ingress IP packets.
"""
return pulumi.get(self, "ingress_security_rules")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
The security list's current state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
The date and time the security list was created, in the format defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="vcnId")
def vcn_id(self) -> pulumi.Output[str]:
"""
The OCID of the VCN the security list belongs to.
"""
return pulumi.get(self, "vcn_id")
| 58.752924
| 347
| 0.68547
| 4,612
| 40,187
| 5.710538
| 0.065265
| 0.055549
| 0.046892
| 0.023921
| 0.928656
| 0.917644
| 0.910051
| 0.901507
| 0.896002
| 0.87884
| 0
| 0.003781
| 0.216787
| 40,187
| 683
| 348
| 58.838946
| 0.832979
| 0.538507
| 0
| 0.711409
| 1
| 0
| 0.142698
| 0.069365
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161074
| false
| 0.003356
| 0.02349
| 0
| 0.281879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
475994a81360cae2e3e853657247fdfef3f04e04
| 1,842
|
py
|
Python
|
kmatch/mixins.py
|
Xavier73/kmatch
|
49d3498e987b871c49458adc602dafed22e3a8cb
|
[
"MIT"
] | 23
|
2015-01-31T07:24:10.000Z
|
2021-09-24T18:05:01.000Z
|
kmatch/mixins.py
|
XavierBrassoud/kmatch
|
49d3498e987b871c49458adc602dafed22e3a8cb
|
[
"MIT"
] | 8
|
2015-11-05T20:43:43.000Z
|
2022-03-23T11:53:25.000Z
|
kmatch/mixins.py
|
XavierBrassoud/kmatch
|
49d3498e987b871c49458adc602dafed22e3a8cb
|
[
"MIT"
] | 11
|
2015-11-05T19:09:03.000Z
|
2021-12-25T11:45:32.000Z
|
from .kmatch import K
class KmatchTestMixin(object):
"""
A mixin for test classes to perform kmatch validation on dictionaries
"""
def assertKmatches(self, pattern, value, suppress_key_errors=False):
"""
Assert that the value matches the kmatch pattern.
:type pattern: list
:param pattern: The kmatch pattern
:type value: dict
:param value: The dictionary to evaluate
:type suppress_key_errors: bool
:param suppress_key_errors: Suppress KeyError exceptions on filters and return False instead. False by default
:raises:
* :class:`KeyError <exceptions.KeyError>` if key from pattern does not exist in input value and the \
suppress_key_errors class variable is False
* :class:`AssertionError <exceptions.AssertionError>` if the value **does not** match the pattern
"""
assert K(pattern, suppress_key_errors=suppress_key_errors).match(value)
def assertNotKmatches(self, pattern, value, suppress_key_errors=True):
"""
Assert that the value does **not** matches the kmatch pattern.
:type pattern: list
:param pattern: The kmatch pattern
:type value: dict
:param value: The dictionary to evaluate
:type suppress_key_errors: bool
:param suppress_key_errors: Suppress KeyError exceptions on filters and return False instead. True by default
:raises:
* :class:`KeyError <exceptions.KeyError>` if key from pattern does not exist in input value and the \
suppress_key_errors class variable is False
* :class:`AssertionError <exceptions.AssertionError>` if the value **does match** the pattern
"""
assert not K(pattern, suppress_key_errors=suppress_key_errors).match(value)
| 39.191489
| 118
| 0.675353
| 224
| 1,842
| 5.446429
| 0.258929
| 0.108197
| 0.167213
| 0.065574
| 0.796721
| 0.796721
| 0.742623
| 0.742623
| 0.742623
| 0.742623
| 0
| 0
| 0.255157
| 1,842
| 46
| 119
| 40.043478
| 0.889213
| 0.673724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.333333
| false
| 0
| 0.166667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
478488ce0a3e38fa7ef9dc81a61e793700dd1856
| 9,525
|
py
|
Python
|
models/layer.py
|
AnnLIU15/SegCovid
|
e8a1ccadfbe56ddc7f1adf33225f77836436fa85
|
[
"MIT"
] | null | null | null |
models/layer.py
|
AnnLIU15/SegCovid
|
e8a1ccadfbe56ddc7f1adf33225f77836436fa85
|
[
"MIT"
] | null | null | null |
models/layer.py
|
AnnLIU15/SegCovid
|
e8a1ccadfbe56ddc7f1adf33225f77836436fa85
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
U2Net的一些组件
'''
class REBNCONV(nn.Module):
def __init__(self, in_channels=1, out_channels=3, dirate=1):
super(REBNCONV, self).__init__()
self.conv_s1 = nn.Conv2d(
in_channels, out_channels, 3, padding=1*dirate, dilation=1*dirate)
self.bn_s1 = nn.BatchNorm2d(out_channels)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
# upsample tensor 'src' to have the same spatial size with tensor 'tar'
def upsample_like(src, tar):
src = F.interpolate(
src, size=tar.shape[2:], mode='bilinear', align_corners=True)
return src
### RSU-7 ###
class RSU7(nn.Module): # UNet07DRES(nn.Module):
def __init__(self, in_channels=1, mid_ch=12, out_channels=3):
super(RSU7, self).__init__()
self.rebnconvin = REBNCONV(in_channels, out_channels, dirate=1)
self.rebnconv1 = REBNCONV(out_channels, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool5 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv7 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv6d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv5d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2, out_channels, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7, hx6), 1))
hx6dup = upsample_like(hx6d, hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup, hx5), 1))
hx5dup = upsample_like(hx5d, hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
hx4dup = upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module): # UNet06DRES(nn.Module):
def __init__(self, in_channels=1, mid_ch=12, out_channels=3):
super(RSU6, self).__init__()
self.rebnconvin = REBNCONV(in_channels, out_channels, dirate=1)
self.rebnconv1 = REBNCONV(out_channels, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv6 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv5d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2, out_channels, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6, hx5), 1))
hx5dup = upsample_like(hx5d, hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup, hx4), 1))
hx4dup = upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module): # UNet05DRES(nn.Module):
def __init__(self, in_channels=1, mid_ch=12, out_channels=3):
super(RSU5, self).__init__()
self.rebnconvin = REBNCONV(in_channels, out_channels, dirate=1)
self.rebnconv1 = REBNCONV(out_channels, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool3 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv5 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv4d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2, out_channels, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5, hx4), 1))
hx4dup = upsample_like(hx4d, hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup, hx3), 1))
hx3dup = upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module): # UNet04DRES(nn.Module):
def __init__(self, in_channels=1, mid_ch=12, out_channels=3):
super(RSU4, self).__init__()
self.rebnconvin = REBNCONV(in_channels, out_channels, dirate=1)
self.rebnconv1 = REBNCONV(out_channels, mid_ch, dirate=1)
self.pool1 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.pool2 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=1)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv3d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2, mid_ch, dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2, out_channels, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
hx3dup = upsample_like(hx3d, hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup, hx2), 1))
hx2dup = upsample_like(hx2d, hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup, hx1), 1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module): # UNet04FRES(nn.Module):
def __init__(self, in_channels=1, mid_ch=12, out_channels=3):
super(RSU4F, self).__init__()
self.rebnconvin = REBNCONV(in_channels, out_channels, dirate=1)
self.rebnconv1 = REBNCONV(out_channels, mid_ch, dirate=1)
self.rebnconv2 = REBNCONV(mid_ch, mid_ch, dirate=2)
self.rebnconv3 = REBNCONV(mid_ch, mid_ch, dirate=4)
self.rebnconv4 = REBNCONV(mid_ch, mid_ch, dirate=8)
self.rebnconv3d = REBNCONV(mid_ch*2, mid_ch, dirate=4)
self.rebnconv2d = REBNCONV(mid_ch*2, mid_ch, dirate=2)
self.rebnconv1d = REBNCONV(mid_ch*2, out_channels, dirate=1)
def forward(self, x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4, hx3), 1))
hx2d = self.rebnconv2d(torch.cat((hx3d, hx2), 1))
hx1d = self.rebnconv1d(torch.cat((hx2d, hx1), 1))
return hx1d + hxin
| 28.951368
| 78
| 0.619633
| 1,331
| 9,525
| 4.277986
| 0.087153
| 0.078152
| 0.081138
| 0.069547
| 0.855462
| 0.842641
| 0.824201
| 0.824201
| 0.770285
| 0.745873
| 0
| 0.066508
| 0.248609
| 9,525
| 328
| 79
| 29.039634
| 0.729076
| 0.023097
| 0
| 0.731959
| 0
| 0
| 0.000865
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06701
| false
| 0
| 0.015464
| 0
| 0.149485
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
47929ceb14966bc98dcf1618fb3f82c60f51a805
| 120
|
py
|
Python
|
unifi/cams/__init__.py
|
hairychris/unifi-cam-proxy
|
5302445719d85f6633ad1d2a7ffa9b99d8d12557
|
[
"MIT"
] | null | null | null |
unifi/cams/__init__.py
|
hairychris/unifi-cam-proxy
|
5302445719d85f6633ad1d2a7ffa9b99d8d12557
|
[
"MIT"
] | null | null | null |
unifi/cams/__init__.py
|
hairychris/unifi-cam-proxy
|
5302445719d85f6633ad1d2a7ffa9b99d8d12557
|
[
"MIT"
] | null | null | null |
from unifi.cams.hikvision import HikvisionCam
from unifi.cams.lorex import LorexCam
from unifi.cams.rtsp import RTSPCam
| 30
| 45
| 0.85
| 18
| 120
| 5.666667
| 0.555556
| 0.264706
| 0.382353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 120
| 3
| 46
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
479cffc330f49cc1606f1278509f0f469362b578
| 42,531
|
py
|
Python
|
example/auto_runner/run_samgraph.py
|
SJTU-IPADS/fgnn-artifacts
|
c96e7ec8204d767152958dc63a764466e90424fd
|
[
"Apache-2.0"
] | 23
|
2022-01-25T13:28:51.000Z
|
2022-03-23T07:05:47.000Z
|
example/auto_runner/run_samgraph.py
|
SJTU-IPADS/gnnlab
|
5c73564e4a9bd5deeff7eed0b923c115ccba34d7
|
[
"Apache-2.0"
] | null | null | null |
example/auto_runner/run_samgraph.py
|
SJTU-IPADS/gnnlab
|
5c73564e4a9bd5deeff7eed0b923c115ccba34d7
|
[
"Apache-2.0"
] | 1
|
2022-02-28T18:48:56.000Z
|
2022-02-28T18:48:56.000Z
|
from common import *
import datetime
import argparse
import time
here = os.path.abspath(os.path.dirname(__file__))
app_dir = os.path.join(here, '../samgraph/multi_gpu')
"""
if log_dir is not None, it will only parse logs
"""
def breakdown_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_samgraph_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=12,
num_col=10
).update_col_definition(
col_id=0,
definition='epoch_time:sample_total'
).update_col_definition(
col_id=1,
definition='sample_time'
).update_col_definition(
col_id=2,
definition='get_cache_miss_index_time'
).update_col_definition(
col_id=3,
definition='enqueue_samples_time'
).update_col_definition(
col_id=4,
definition='epoch_time:copy_time'
).update_col_definition(
col_id=5,
definition='epoch_time:train_total'
).update_col_definition(
col_id=6,
definition='train_time'
).update_col_definition(
col_id=7,
definition='convert_time'
).update_col_definition(
col_id=8,
definition='cache_percentage'
).update_col_definition(
col_id=9,
definition='cache_hit_rate'
).update_row_definition(
row_id=0,
col_range=[0, 9],
app=App.gcn,
dataset=Dataset.products
).update_row_definition(
row_id=1,
col_range=[0, 9],
app=App.gcn,
dataset=Dataset.twitter
).update_row_definition(
row_id=2,
col_range=[0, 9],
app=App.gcn,
dataset=Dataset.papers100M
).update_row_definition(
row_id=3,
col_range=[0, 9],
app=App.gcn,
dataset=Dataset.uk_2006_05
).update_row_definition(
row_id=4,
col_range=[0, 9],
app=App.graphsage,
dataset=Dataset.products
).update_row_definition(
row_id=5,
col_range=[0, 9],
app=App.graphsage,
dataset=Dataset.twitter
).update_row_definition(
row_id=6,
col_range=[0, 9],
app=App.graphsage,
dataset=Dataset.papers100M
).update_row_definition(
row_id=7,
col_range=[0, 9],
app=App.graphsage,
dataset=Dataset.uk_2006_05
).update_row_definition(
row_id=8,
col_range=[0, 9],
app=App.pinsage,
dataset=Dataset.products
).update_row_definition(
row_id=9,
col_range=[0, 9],
app=App.pinsage,
dataset=Dataset.twitter
).update_row_definition(
row_id=10,
col_range=[0, 9],
app=App.pinsage,
dataset=Dataset.papers100M
).update_row_definition(
row_id=11,
col_range=[0, 9],
app=App.pinsage,
dataset=Dataset.uk_2006_05
).create()
ConfigList(
test_group_name='Samgraph breakdown test'
).select(
'app',
[App.gcn, App.graphsage, App.pinsage]
).combo(
'app',
[App.gcn, App.graphsage],
'sample_type',
['khop2']
).combo(
'app',
[App.pinsage],
'sample_type',
['random_walk']
).override(
'num_epoch',
[10]
).override(
'omp-thread-num',
[40]
).combo(
'app',
[App.gcn],
'fanout',
['5 10 15']
).combo(
'app',
[App.graphsage],
'fanout',
['25 10']
).override(
'BOOL_pipeline',
['no_pipeline']
).multi_combo(
'and',
{'app': [App.gcn], 'dataset': [Dataset.products]},
'cache_percentage',
['1.0']
).multi_combo(
'and',
{'app': [App.gcn], 'dataset': [Dataset.papers100M]},
'cache_percentage',
['0.21']
).multi_combo(
'and',
{'app': [App.gcn], 'dataset': [Dataset.twitter]},
'cache_percentage',
['0.25']
).multi_combo(
'and',
{'app': [App.gcn], 'dataset': [Dataset.uk_2006_05]},
'cache_percentage',
['0.14']
).multi_combo(
'and',
{'app': [App.graphsage], 'dataset': [Dataset.products]},
'cache_percentage',
['1.0']
).multi_combo(
'and',
{'app': [App.graphsage], 'dataset': [Dataset.papers100M]},
'cache_percentage',
['0.25']
).multi_combo(
'and',
{'app': [App.graphsage], 'dataset': [Dataset.twitter]},
'cache_percentage',
['0.32']
).multi_combo(
'and',
{'app': [App.graphsage], 'dataset': [Dataset.uk_2006_05]},
'cache_percentage',
['0.18']
).multi_combo(
'and',
{'app': [App.pinsage], 'dataset': [Dataset.products]},
'cache_percentage',
['1.0']
).multi_combo(
'and',
{'app': [App.pinsage], 'dataset': [Dataset.papers100M]},
'cache_percentage',
['0.22']
).multi_combo(
'and',
{'app': [App.pinsage], 'dataset': [Dataset.twitter]},
'cache_percentage',
['0.26']
).multi_combo(
'and',
{'app': [App.pinsage], 'dataset': [Dataset.uk_2006_05]},
'cache_percentage',
['0.13']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs_no_output(
logtable=log_table
)
with open(os.path.join(log_dir, 'test_result.txt'), 'w', encoding='utf8') as f:
for i in range(log_table.num_row):
f.write(
'& {{{:s} = {:s} + {:s} + {:s}}} & {{{:s}}}~~({{{:s}{:.0f}\%}},{{{:s}{:.0f}\%}}) & {{{:s} = {:s} + {:s}}} \\\\ % {:s}\n'.format(
log_table.data[i][0],
log_table.data[i][1],
log_table.data[i][2],
log_table.data[i][3],
log_table.data[i][4],
'' if float(log_table.data[i][8]) == 1.0 else '~~',
float(log_table.data[i][8]) * 100,
'' if float(log_table.data[i][9]) == 1.0 else '~~',
float(log_table.data[i][9]) * 100,
log_table.data[i][5],
log_table.data[i][6],
log_table.data[i][7],
os.sep.join(
os.path.normpath(log_table.row_log_reference[i][0]).split(os.sep)[-2:])
))
toc = time.time()
print('breakdown test uses {:.4f} secs'.format(toc - tic))
def overall_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_samgraph_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=12,
num_col=2
).update_col_definition(
col_id=0,
definition='pipeline_train_epoch_time'
).update_col_definition(
col_id=1,
definition='cache_percentage'
).update_row_definition(
row_id=0,
col_range=[0, 1],
app=App.gcn,
dataset=Dataset.products
).update_row_definition(
row_id=1,
col_range=[0, 1],
app=App.gcn,
dataset=Dataset.twitter
).update_row_definition(
row_id=2,
col_range=[0, 1],
app=App.gcn,
dataset=Dataset.papers100M
).update_row_definition(
row_id=3,
col_range=[0, 1],
app=App.gcn,
dataset=Dataset.uk_2006_05
).update_row_definition(
row_id=4,
col_range=[0, 1],
app=App.graphsage,
dataset=Dataset.products
).update_row_definition(
row_id=5,
col_range=[0, 1],
app=App.graphsage,
dataset=Dataset.twitter,
num_sample_worker=2
).update_row_definition(
row_id=6,
col_range=[0, 1],
app=App.graphsage,
dataset=Dataset.papers100M,
num_sample_worker=2
).update_row_definition(
row_id=7,
col_range=[0, 1],
app=App.graphsage,
dataset=Dataset.uk_2006_05,
num_sample_worker=1
).update_row_definition(
row_id=8,
col_range=[0, 1],
app=App.pinsage,
dataset=Dataset.products
).update_row_definition(
row_id=9,
col_range=[0, 1],
app=App.pinsage,
dataset=Dataset.twitter
).update_row_definition(
row_id=10,
col_range=[0, 1],
app=App.pinsage,
dataset=Dataset.papers100M
).update_row_definition(
row_id=11,
col_range=[0, 1],
app=App.pinsage,
dataset=Dataset.uk_2006_05
).create()
ConfigList(
test_group_name='Samgraph overall test'
).select(
'app',
[App.gcn, App.graphsage, App.pinsage]
).combo(
'app',
[App.gcn, App.graphsage],
'sample_type',
['khop2']
).combo(
'app',
[App.pinsage],
'sample_type',
['random_walk']
).override(
'num_epoch',
[10]
).override(
'omp-thread-num',
[40]
).combo(
'app',
[App.gcn],
'fanout',
['5 10 15']
).combo(
'app',
[App.graphsage],
'fanout',
['25 10']
).override(
'BOOL_pipeline',
['pipeline']
).multi_combo_multi_override(
'and',
{'app': [App.gcn], 'dataset': [Dataset.products]},
{'cache_percentage': 1.0, 'num_sample_worker': 3, 'num_train_worker': 5}
).multi_combo_multi_override(
'and',
{'app': [App.gcn], 'dataset': [Dataset.papers100M]},
{'cache_percentage': 0.20, 'num_sample_worker': 2, 'num_train_worker': 6}
).multi_combo_multi_override(
'and',
{'app': [App.gcn], 'dataset': [Dataset.twitter]},
{'cache_percentage': 0.18, 'num_sample_worker': 2, 'num_train_worker': 6}
).multi_combo_multi_override(
'and',
{'app': [App.gcn], 'dataset': [Dataset.uk_2006_05]},
{'cache_percentage': 0.11, 'num_sample_worker': 2, 'num_train_worker': 6}
).multi_combo_multi_override(
'and',
{'app': [App.graphsage], 'dataset': [Dataset.products]},
{'cache_percentage': 1.0, 'num_sample_worker': 4, 'num_train_worker': 4}
).multi_combo_multi_override_list(
'and',
{'app': [App.graphsage], 'dataset': [Dataset.papers100M]},
[
{'cache_percentage': 0.24, 'num_sample_worker': 2, 'num_train_worker': 6},
# {'cache_percentage': 0.24, 'num_sample_worker': 3, 'num_train_worker': 5}
]
).multi_combo_multi_override_list(
'and',
{'app': [App.graphsage], 'dataset': [Dataset.twitter]},
[
{'cache_percentage': 0.31, 'num_sample_worker': 2, 'num_train_worker': 6},
# {'cache_percentage': 0.31, 'num_sample_worker': 3, 'num_train_worker': 5}
]
).multi_combo_multi_override_list(
'and',
{'app': [App.graphsage], 'dataset': [Dataset.uk_2006_05]},
[
{'cache_percentage': 0.16, 'num_sample_worker': 1, 'num_train_worker': 7},
# {'cache_percentage': 0.16, 'num_sample_worker': 2, 'num_train_worker': 6},
]
).multi_combo_multi_override(
'and',
{'app': [App.pinsage], 'dataset': [Dataset.products]},
{'cache_percentage': 1.0, 'num_sample_worker': 1, 'num_train_worker': 7}
).multi_combo_multi_override(
'and',
{'app': [App.pinsage], 'dataset': [Dataset.papers100M]},
{'cache_percentage': 0.21, 'num_sample_worker': 1, 'num_train_worker': 7}
).multi_combo_multi_override(
'and',
{'app': [App.pinsage], 'dataset': [Dataset.twitter]},
{'cache_percentage': 0.23, 'num_sample_worker': 1, 'num_train_worker': 7}
).multi_combo_multi_override(
'and',
{'app': [App.pinsage], 'dataset': [Dataset.uk_2006_05]},
{'cache_percentage': 0.09, 'num_sample_worker': 1, 'num_train_worker': 7}
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print('overall test uses {:.4f} secs'.format(toc - tic))
def gcn_scalability_test(log_folder, mock):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_samgraph_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=18,
num_col=4
).update_col_definition(
col_id=0,
definition='epoch_time:sample_total'
).update_col_definition(
col_id=1,
definition='epoch_time:copy_time'
).update_col_definition(
col_id=2,
definition='epoch_time:train_total'
).update_col_definition(
col_id=3,
definition='pipeline_train_epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=1,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=1,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=2,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=2,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=3,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=3,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=4,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=4,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=5,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=5,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=6,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=6,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=7,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=7,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=1,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=8,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=2,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=9,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=3,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=10,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=4,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=11,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=5,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=12,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=6,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=13,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=1,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=14,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=2,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=15,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=3,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=16,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=4,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=17,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=5,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=0,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=1,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=1,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=2,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=2,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=3,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=3,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=4,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=4,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=5,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=5,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=6,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=6,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=7,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=7,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=1,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=8,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=2,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=9,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=3,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=10,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=4,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=11,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=5,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=12,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=6,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=13,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=1,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=14,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=2,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=15,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=3,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=16,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=4,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=17,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=5,
BOOL_pipeline='pipeline'
).create()
ConfigList(
test_group_name='Samgraph GCN scalability test'
).select(
'app',
[App.gcn]
).select(
'dataset',
[Dataset.papers100M]
).override(
'sample_type',
['khop2']
).override(
'num_epoch',
[10]
).override(
'omp-thread-num',
[40]
).combo(
'app',
[App.gcn],
'fanout',
['5 10 15']
).multi_combo_multi_override_list(
'and',
{'app' : [App.gcn]},
[
{'num_sample_worker': 1, 'num_train_worker': 1, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 2, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 3, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 4, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 5, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 6, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 7, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 1, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 2, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 3, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 4, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 5, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 6, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 3, 'num_train_worker': 1, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 3, 'num_train_worker': 2, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.19},
{'num_sample_worker': 3, 'num_train_worker': 3, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.19},
{'num_sample_worker': 3, 'num_train_worker': 4, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.19},
{'num_sample_worker': 3, 'num_train_worker': 5, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 1, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 2, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 3, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 4, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 5, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 6, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 7, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 1, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 2, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 3, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 4, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 5, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 6, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 3, 'num_train_worker': 1, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 3, 'num_train_worker': 2, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.19},
{'num_sample_worker': 3, 'num_train_worker': 3, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.19},
{'num_sample_worker': 3, 'num_train_worker': 4, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.19},
{'num_sample_worker': 3, 'num_train_worker': 5, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.20},
]
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir,
left_wrap='',
right_wrap='',
sep='\t'
)
toc = time.time()
print('Samgraph GCN scalability test uses {:.4f} secs'.format(toc - tic))
def gcn_twitter_scalability_test(log_folder, mock):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_samgraph_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=18,
num_col=1
).update_col_definition(
col_id=0,
definition='pipeline_train_epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 0],
num_sample_worker=1,
num_train_worker=1
).update_row_definition(
row_id=1,
col_range=[0, 0],
num_sample_worker=1,
num_train_worker=2
).update_row_definition(
row_id=2,
col_range=[0, 0],
num_sample_worker=1,
num_train_worker=3
).update_row_definition(
row_id=3,
col_range=[0, 0],
num_sample_worker=1,
num_train_worker=4
).update_row_definition(
row_id=4,
col_range=[0, 0],
num_sample_worker=1,
num_train_worker=5
).update_row_definition(
row_id=5,
col_range=[0, 0],
num_sample_worker=1,
num_train_worker=6
).update_row_definition(
row_id=6,
col_range=[0, 0],
num_sample_worker=1,
num_train_worker=7
).update_row_definition(
row_id=7,
col_range=[0, 0],
num_sample_worker=2,
num_train_worker=1
).update_row_definition(
row_id=8,
col_range=[0, 0],
num_sample_worker=2,
num_train_worker=2
).update_row_definition(
row_id=9,
col_range=[0, 0],
num_sample_worker=2,
num_train_worker=3
).update_row_definition(
row_id=10,
col_range=[0, 0],
num_sample_worker=2,
num_train_worker=4
).update_row_definition(
row_id=11,
col_range=[0, 0],
num_sample_worker=2,
num_train_worker=5
).update_row_definition(
row_id=12,
col_range=[0, 0],
num_sample_worker=2,
num_train_worker=6
).update_row_definition(
row_id=13,
col_range=[0, 0],
num_sample_worker=3,
num_train_worker=1
).update_row_definition(
row_id=14,
col_range=[0, 0],
num_sample_worker=3,
num_train_worker=2
).update_row_definition(
row_id=15,
col_range=[0, 0],
num_sample_worker=3,
num_train_worker=3
).update_row_definition(
row_id=16,
col_range=[0, 0],
num_sample_worker=3,
num_train_worker=4
).update_row_definition(
row_id=17,
col_range=[0, 0],
num_sample_worker=3,
num_train_worker=5
).create()
ConfigList(
test_group_name='Samgraph GCN Twitter scalability test'
).select(
'app',
[App.gcn]
).select(
'dataset',
[Dataset.twitter]
).override(
'sample_type',
['khop2']
).override(
'num_epoch',
[10]
).override(
'omp-thread-num',
[40]
).combo(
'app',
[App.gcn],
'fanout',
['5 10 15']
).multi_combo_multi_override_list(
'and',
{'app' : [App.gcn]},
[
{'num_sample_worker': 1, 'num_train_worker': 1, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 2, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 3, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 4, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 5, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 6, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 1, 'num_train_worker': 7, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 1, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 2, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 3, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.18},
{'num_sample_worker': 2, 'num_train_worker': 4, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 2, 'num_train_worker': 5, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.19},
{'num_sample_worker': 2, 'num_train_worker': 6, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.18},
{'num_sample_worker': 3, 'num_train_worker': 1, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 3, 'num_train_worker': 2, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 3, 'num_train_worker': 3, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 3, 'num_train_worker': 4, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.20},
{'num_sample_worker': 3, 'num_train_worker': 5, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.19},
]
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir,
left_wrap='',
right_wrap='',
sep='\t'
)
toc = time.time()
print('Samgraph GCN Twitter scalability test uses {:.4f} secs'.format(toc - tic))
def pinsage_scalability_test(log_folder, mock):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_samgraph_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=18,
num_col=4
).update_col_definition(
col_id=0,
definition='epoch_time:sample_total'
).update_col_definition(
col_id=1,
definition='epoch_time:copy_time'
).update_col_definition(
col_id=2,
definition='epoch_time:train_total'
).update_col_definition(
col_id=3,
definition='pipeline_train_epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=1,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=1,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=2,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=2,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=3,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=3,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=4,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=4,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=5,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=5,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=6,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=6,
col_range=[0, 2],
num_sample_worker=1,
num_train_worker=7,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=7,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=1,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=8,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=2,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=9,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=3,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=10,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=4,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=11,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=5,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=12,
col_range=[0, 2],
num_sample_worker=2,
num_train_worker=6,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=13,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=1,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=14,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=2,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=15,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=3,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=16,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=4,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=17,
col_range=[0, 2],
num_sample_worker=3,
num_train_worker=5,
BOOL_pipeline='no_pipeline'
).update_row_definition(
row_id=0,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=1,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=1,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=2,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=2,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=3,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=3,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=4,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=4,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=5,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=5,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=6,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=6,
col_range=[3, 3],
num_sample_worker=1,
num_train_worker=7,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=7,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=1,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=8,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=2,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=9,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=3,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=10,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=4,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=11,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=5,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=12,
col_range=[3, 3],
num_sample_worker=2,
num_train_worker=6,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=13,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=1,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=14,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=2,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=15,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=3,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=16,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=4,
BOOL_pipeline='pipeline'
).update_row_definition(
row_id=17,
col_range=[3, 3],
num_sample_worker=3,
num_train_worker=5,
BOOL_pipeline='pipeline'
).create()
ConfigList(
test_group_name='Samgraph PinSAGE scalability test'
).select(
'app',
[App.pinsage]
).select(
'dataset',
[Dataset.papers100M]
).override(
'sample_type',
['random_walk']
).override(
'num_epoch',
[10]
).override(
'omp-thread-num',
[40]
).multi_combo_multi_override_list(
'and',
{'app' : [App.pinsage]},
[
{'num_sample_worker': 1, 'num_train_worker': 1, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 2, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 3, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 4, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 5, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 6, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 7, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 1, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 2, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 3, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 4, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 5, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 6, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 1, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 2, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 3, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 4, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 5, 'BOOL_pipeline': 'pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 1, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 2, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 3, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 4, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 5, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 6, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 1, 'num_train_worker': 7, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 1, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 2, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 3, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 4, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 5, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 2, 'num_train_worker': 6, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 1, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 2, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 3, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 4, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
{'num_sample_worker': 3, 'num_train_worker': 5, 'BOOL_pipeline': 'no_pipeline', 'cache_percentage': 0.21},
]
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir,
left_wrap='',
right_wrap='',
sep='\t'
)
toc = time.time()
print('Samgraph PinSAGE scalability test uses {:.4f} secs'.format(toc - tic))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("DGL runner")
argparser.add_argument('-l', '--log-folder', default=None)
argparser.add_argument('-m', '--mock', action='store_true', default=False)
args = argparser.parse_args()
breakdown_test(args.log_folder, args.mock)
# overall_test(args.log_folder, args.mock)
# gcn_scalability_test(args.log_folder, args.mock)
# gcn_twitter_scalability_test(args.log_folder, args.mock)
# pinsage_scalability_test(args.log_folder, args.mock)
| 33.835322
| 144
| 0.584256
| 5,338
| 42,531
| 4.286437
| 0.033908
| 0.077881
| 0.129802
| 0.109611
| 0.957476
| 0.95363
| 0.946025
| 0.931865
| 0.916743
| 0.868931
| 0
| 0.043919
| 0.27246
| 42,531
| 1,256
| 145
| 33.862261
| 0.695537
| 0.013096
| 0
| 0.851667
| 0
| 0.005
| 0.242548
| 0.019594
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004167
| false
| 0
| 0.003333
| 0
| 0.0075
| 0.004167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
47c57a8c12fe001c0bf9258c584ac3cb7c17f51a
| 8,565
|
py
|
Python
|
tests/parsers/can_frame_parser_test.py
|
sceaj/track_logger_postprocessor
|
120bd68d9bf6354c1d3a97de6178df040f3ab7b3
|
[
"MIT"
] | null | null | null |
tests/parsers/can_frame_parser_test.py
|
sceaj/track_logger_postprocessor
|
120bd68d9bf6354c1d3a97de6178df040f3ab7b3
|
[
"MIT"
] | null | null | null |
tests/parsers/can_frame_parser_test.py
|
sceaj/track_logger_postprocessor
|
120bd68d9bf6354c1d3a97de6178df040f3ab7b3
|
[
"MIT"
] | null | null | null |
'''
Created on May 24, 2019
@author: jeff
'''
import unittest
from parsers.can_frame_parser import CanFrame242Extractor, CanFrame245Extractor, CanFrame246Extractor, CanFrame24AExtractor
from parsers.can_frame_parser import CanFrame441Extractor, CanFrame44BExtractor, CanFrameParser
from converter.data_state import DataState
class CanFrameParserTest(unittest.TestCase):
test_state = DataState()
test_parser = CanFrameParser(test_state)
def testFrame242_1(self):
test_fields = ["$CNDRV","476.72","242","01","00","00","00","58","00","65","00"]
test_extractor = CanFrame242Extractor()
test_extractor.extractData(test_fields, CanFrameParserTest.test_state)
self.assertEqual(0, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Clutch)), 'Clutch Pedal was not parsed correctly.')
self.assertEqual(0, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.RPM)), 'Engine RPM was not parsed correctly.')
self.assertEqual(0, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.ECU_Throttle)), 'ECU commanded throttle was not parsed correctly.')
pass
def testFrame242_2(self):
test_fields = ["$CNDRV","476.72","242","09","00","C8","50","58","C3","65","00"]
test_extractor = CanFrame242Extractor()
test_extractor.extractData(test_fields, CanFrameParserTest.test_state)
self.assertEqual(1, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Clutch)), 'Clutch Pedal was not parsed correctly.')
self.assertEqual(5170, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.RPM)), 'Engine RPM was not parsed correctly.')
self.assertAlmostEqual(76, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.ECU_Throttle)), 2, 'ECU commanded throttle was not parsed correctly.')
pass
def testFrame245(self):
test_fields = ["$CNDRV","476.72","245","01","6C","02","00","58","00","65","00"]
test_extractor = CanFrame245Extractor()
test_extractor.extractData(test_fields, CanFrameParserTest.test_state)
self.assertEqual(96, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Coolant_Temperature)), 'Coolant temperature was not parsed correctly.')
self.assertEqual(1, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Brake)), 'Brake pedal was not parsed correctly.')
pass
def testFrame246(self):
test_fields = ["$CNDRV","476.72","246","0B","00","C8","E1","58","C3","65","00"]
test_extractor = CanFrame246Extractor()
test_extractor.extractData(test_fields, CanFrameParserTest.test_state)
self.assertEqual(3, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Gear)), 'Gear indicator was not parsed correctly.')
self.assertAlmostEqual(88, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Throttle)), 2, 'ECU commanded throttle was not parsed correctly.')
pass
def testFrame24A(self):
test_fields = ["$CNDRV","476.72","24A","0B","10","C8","10","0E","10","D2","10"]
test_extractor = CanFrame24AExtractor()
test_extractor.extractData(test_fields, CanFrameParserTest.test_state)
self.assertAlmostEqual(41.07, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.LF_KPH)), 2, 'LF wheel speed was not parsed correctly.')
self.assertAlmostEqual(42.96, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.RF_KPH)), 2, 'RF wheel speed was not parsed correctly.')
self.assertAlmostEqual(41.10, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.LR_KPH)), 2, 'LR wheel speed was not parsed correctly.')
self.assertAlmostEqual(41.12, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.RR_KPH)), 2, 'RR wheel speed was not parsed correctly.')
pass
def testFrame441(self):
test_fields = ["$CNDRV","476.72","441","0B","10","C8","10","0E","67","82","10"]
test_extractor = CanFrame441Extractor()
test_extractor.extractData(test_fields, CanFrameParserTest.test_state)
self.assertAlmostEqual(89.33, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Oil_Temperature)), 2, 'Oil temperature was not parsed correctly.')
self.assertEqual(325, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Oil_Pressure)), 'Oil pressure was not parsed correctly.')
pass
def testFrame44B(self):
test_fields = ["$CNDRV","476.72","44B","2B","10","C8","10","0E","10","D2","10"]
test_extractor = CanFrame44BExtractor()
test_extractor.extractData(test_fields, CanFrameParserTest.test_state)
self.assertEqual(43, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Brake_Pressure)), 'Brake pressure was not parsed correctly.')
pass
def testParserFrame242(self):
test_line = "$CNDRV,477.72,242,09,00,C8,50,58,C3,65,00"
CanFrameParserTest.test_parser.parse(test_line)
self.assertEqual(477.72, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Time)), 'Time was not parsed correctly.')
self.assertEqual(1, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Clutch)), 'Clutch Pedal was not parsed correctly.')
self.assertEqual(5170, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.RPM)), 'Engine RPM was not parsed correctly.')
self.assertAlmostEqual(76, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.ECU_Throttle)), 2, 'ECU commanded throttle was not parsed correctly.')
pass
def testParserFrame245(self):
test_line = "$CNDRV,477.72,245,09,6E,C8,50,58,C3,65,00"
CanFrameParserTest.test_parser.parse(test_line)
self.assertAlmostEqual(477.72, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Time)), 2, 'Time was not parsed correctly.')
self.assertAlmostEqual(98.67, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Coolant_Temperature)), 2, 'Coolant temperature was not parsed correctly.')
self.assertEqual(0, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Brake)), 'Brake pedal was not parsed correctly.')
pass
def testParserFrame246(self):
test_line = "$CNDRV,478.50,246,04,00,C8,FA,58,C3,65,00"
CanFrameParserTest.test_parser.parse(test_line)
self.assertEqual(478.50, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Time)), 'Time was not parsed correctly.')
self.assertEqual(4, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Gear)), 'Gear indicator was not parsed correctly.')
self.assertAlmostEqual(98, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Throttle)), 2, 'ECU commanded throttle was not parsed correctly.')
pass
def testParserFrame24A(self):
test_line = "$CNDRV,480.17,24A,0B,20,C8,20,F7,20,02,21"
CanFrameParserTest.test_parser.parse(test_line)
self.assertEqual(480.17, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.Time)), 'Time was not parsed correctly.')
self.assertAlmostEqual(82.030, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.LF_KPH)), 3, 'LF wheel speed was not parsed correctly.')
self.assertAlmostEqual(83.920, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.RF_KPH)), 3, 'RF wheel speed was not parsed correctly.')
self.assertAlmostEqual(84.390, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.LR_KPH)), 3, 'LR wheel speed was not parsed correctly.')
self.assertAlmostEqual(84.810, self.test_state.get_data_item(DataState.get_data_name_at_idx(DataState.names.RR_KPH)), 3, 'RR wheel speed was not parsed correctly.')
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testFrame242']
unittest.main()
| 75.131579
| 199
| 0.727496
| 1,180
| 8,565
| 5.022034
| 0.123729
| 0.075599
| 0.070199
| 0.086399
| 0.852852
| 0.850827
| 0.808977
| 0.764428
| 0.736416
| 0.663179
| 0
| 0.057457
| 0.150613
| 8,565
| 114
| 200
| 75.131579
| 0.757113
| 0.010041
| 0
| 0.326087
| 0
| 0.043478
| 0.195231
| 0.019358
| 0
| 0
| 0
| 0
| 0.347826
| 1
| 0.119565
| false
| 0.119565
| 0.043478
| 0
| 0.195652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
47fb333bde97675a5b1cf517310ef59892da40db
| 16,814
|
py
|
Python
|
Lib/site-packages/numarray/array_protocol.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/numarray/array_protocol.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
Lib/site-packages/numarray/array_protocol.py
|
raychorn/svn_Python-2.5.1
|
425005b1b489ba44ec0bb989e077297e8953d9be
|
[
"PSF-2.0"
] | null | null | null |
"""array_protocol contains self-tests for the scipy newcore array protocol.
Currently array_protcol tests numarray:Numeric exchanges. If Numeric fails
to import, array_protocol runs no tests and returns a (0,0) doctest result
tuple.
"""
## doctests for numarray-->Numeric conversions
import sys
import numarray
try:
import Numeric
except ImportError:
def test_Numeric():
"""Numeric not installed dummy selftest"""
pass
else:
def test_Numeric():
"""
===========================================================================
Numeric interoperability
Test all of the Numeric typecodes with the exception of 'i' which doesn't
"round-trip" consistently for both 32 and 64-bit systems.
>>> typecodes = ['b', '1', 's', 'l', 'f', 'd', 'F', 'D']
Checking numarray-->Numeric conversion. Non-strided values. Data copy.
>>> for typecode in typecodes:
... na = numarray.array([1,2,3], typecode)
... num = Numeric.zeros(shape=2, typecode=typecode)
... num = Numeric.array(na, copy=1)
... num2 = Numeric.array([1,2,3], typecode)
... print typecode, num == num2, int(num.typecode() == num2.typecode())
b [1 1 1] 1
1 [1 1 1] 1
s [1 1 1] 1
l [1 1 1] 1
f [1 1 1] 1
d [1 1 1] 1
F [1 1 1] 1
D [1 1 1] 1
Checking numarray-->Numeric conversion. Non-strided values. No data copy.
>>> for typecode in typecodes:
... na = numarray.array([1,2,3], typecode)
... num = Numeric.zeros(shape=2, typecode=typecode)
... num = Numeric.array(na, copy=0)
... num2 = Numeric.array([1,2,3], typecode)
... print typecode, num == num2, int(num.typecode() == num2.typecode())
b [1 1 1] 1
1 [1 1 1] 1
s [1 1 1] 1
l [1 1 1] 1
f [1 1 1] 1
d [1 1 1] 1
F [1 1 1] 1
D [1 1 1] 1
Checking numarray-->Numeric conversion. Strided values. Data copy.
>>> for typecode in typecodes:
... na = numarray.array([1,2,3], typecode)
... num = Numeric.zeros(shape=2, typecode=typecode)
... num = Numeric.array(na[::2], copy=0)
... num2 = Numeric.array([1,3], typecode)
... print typecode, num == num2, int(num.typecode() == num2.typecode())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
Checking numarray-->Numeric conversion. Strided values. No data copy.
>>> for typecode in typecodes:
... na = numarray.array([1,2,3], typecode)
... num = Numeric.zeros(shape=2, typecode=typecode)
... num = Numeric.array(na[::2], copy=1)
... num2 = Numeric.array([1,3], typecode)
... print typecode, num == num2, int(num.typecode() == num2.typecode())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
Checking numarray-->Numeric conversion. Offseted values. Data copy.
>>> for typecode in typecodes:
... na = numarray.array([1,2,3], typecode)
... num = Numeric.zeros(shape=2, typecode=typecode)
... num = Numeric.array(na[1:], copy=1)
... num2 = Numeric.array([2,3], typecode)
... print typecode, num == num2, int(num.typecode() == num2.typecode())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
Checking numarray-->Numeric conversion. Offseted values. No data copy.
>>> for typecode in typecodes:
... na = numarray.array([1,2,3], typecode)
... num = Numeric.zeros(shape=2, typecode=typecode)
... num = Numeric.array(na[1:], copy=0)
... num2 = Numeric.array([2,3], typecode)
... print typecode, num == num2, int(num.typecode() == num2.typecode())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
>>> typecodes.append('i')
Checking Numeric<--numarray assignment. Non-strided values. Data copy.
>>> for typecode in typecodes:
... na = numarray.array([1,2,3], typecode=typecode)
... num = Numeric.zeros(shape=3, typecode=typecode)
... num[...] = na
... num2 = Numeric.array([1,2,3], typecode)
... print typecode, num == num2, int(num.typecode() == num2.typecode())
b [1 1 1] 1
1 [1 1 1] 1
s [1 1 1] 1
l [1 1 1] 1
f [1 1 1] 1
d [1 1 1] 1
F [1 1 1] 1
D [1 1 1] 1
i [1 1 1] 1
Checking Numeric<--numarray assignment. Strided values. Data copy.
>>> for typecode in typecodes:
... na = numarray.array([1,2,3], typecode)
... num = Numeric.zeros(shape=2, typecode=typecode)
... num[...] = na[::2]
... num2 = Numeric.array([1,3], typecode)
... print typecode, num == num2, int(num.typecode() == num2.typecode())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
i [1 1] 1
Checking numarray<--Numeric assignment. Non-strided values. Data copy.
>>> for typecode in typecodes:
... num = Numeric.array([1,2,3], typecode)
... na = numarray.zeros(shape=3, typecode=typecode)
... na[...] = num
... nb = numarray.array([1,2,3], typecode)
... print typecode, na == nb, int(na.type() == nb.type())
b [1 1 1] 1
1 [1 1 1] 1
s [1 1 1] 1
l [1 1 1] 1
f [1 1 1] 1
d [1 1 1] 1
F [1 1 1] 1
D [1 1 1] 1
i [1 1 1] 1
Checking numarray<--Numeric assignment. Strided values. Data copy.
>>> for typecode in typecodes:
... num = Numeric.array([1,2,3], typecode)
... na = numarray.zeros(shape=2, typecode=typecode)
... na[...] = num[::2]
... nb = numarray.array([1,3], typecode)
... print typecode, na == nb, int(na.type() == nb.type())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
i [1 1] 1
Checking Numeric-->numarray conversion. Non-strided values. Data copy.
>>> for typecode in typecodes:
... num = Numeric.array([1,2,3], typecode)
... na = numarray.zeros(shape=2, typecode=typecode)
... na = numarray.array(num, copy=1)
... nb = numarray.array([1,2,3], typecode)
... print typecode, na == nb, int(na.type() == nb.type())
b [1 1 1] 1
1 [1 1 1] 1
s [1 1 1] 1
l [1 1 1] 1
f [1 1 1] 1
d [1 1 1] 1
F [1 1 1] 1
D [1 1 1] 1
i [1 1 1] 1
Checking Numeric-->numarray conversion. Non-strided values. No data copy.
>>> for typecode in typecodes:
... num = Numeric.array([1,2,3], typecode)
... na = numarray.zeros(shape=2, typecode=typecode)
... na = numarray.array(num, copy=0)
... nb = numarray.array([1,2,3], typecode)
... print typecode, na == nb, int(na.type() == nb.type())
b [1 1 1] 1
1 [1 1 1] 1
s [1 1 1] 1
l [1 1 1] 1
f [1 1 1] 1
d [1 1 1] 1
F [1 1 1] 1
D [1 1 1] 1
i [1 1 1] 1
Checking Numeric-->numarray conversion. Strided values. Data copy.
>>> for typecode in typecodes:
... num = Numeric.array([1,2,3], typecode)
... na = numarray.zeros(shape=2, typecode=typecode)
... na = numarray.array(num[::2], copy=1)
... nb = numarray.array([1,3], typecode)
... print typecode, na == nb, int(na.type() == nb.type())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
i [1 1] 1
Checking Numeric-->numarray conversion. Strided values. No data copy.
>>> for typecode in typecodes:
... num = Numeric.array([1,2,3], typecode)
... na = numarray.zeros(shape=2, typecode=typecode)
... na = numarray.array(num[::2], copy=0)
... nb = numarray.array([1,3], typecode)
... print typecode, na == nb, int(na.type() == nb.type())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
i [1 1] 1
Checking Numeric-->numarray conversion. Offseted values. Data copy.
>>> for typecode in typecodes:
... num = Numeric.array([1,2,3], typecode)
... na = numarray.zeros(shape=2, typecode=typecode)
... na = numarray.array(num[1:], copy=1)
... nb = numarray.array([2,3], typecode)
... print typecode, na == nb, int(na.type() == nb.type())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
i [1 1] 1
Checking Numeric-->numarray conversion. Offseted values. No data copy.
>>> for typecode in typecodes:
... num = Numeric.array([1,2,3], typecode)
... na = numarray.zeros(shape=2, typecode=typecode)
... na = numarray.array(num[1:], copy=0)
... nb = numarray.array([2,3], typecode)
... print typecode, na == nb, int(na.type() == nb.type())
b [1 1] 1
1 [1 1] 1
s [1 1] 1
l [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
i [1 1] 1
"""
try:
import numpy
except ImportError:
def test_numpy():
"""numpy not installed dummy selftest"""
pass
else:
def test_numpy():
"""
=============================================================================
numpy interoperability
>>> dtypes = ['b','B','h', 'H', 'i','I', 'q','Q', 'f', 'd','F', 'D' ]
Checking numpy<--numarray assignment. Non-strided values. Data copy.
>>> for dtype in dtypes:
... na = numarray.array([1,2,3], dtype=dtype)
... num = numpy.zeros(shape=3, dtype=dtype)
... num[...] = na
... num2 = numpy.array([1,2,3], dtype=dtype)
... print dtype, num == num2, int(num.dtype == num2.dtype)
b [True True True] 1
B [True True True] 1
h [True True True] 1
H [True True True] 1
i [True True True] 1
I [True True True] 1
q [True True True] 1
Q [True True True] 1
f [True True True] 1
d [True True True] 1
F [True True True] 1
D [True True True] 1
Checking numpy<--numarray assignment. Strided values. Data copy.
>>> for dtype in dtypes:
... na = numarray.array([1,2,3], dtype=dtype)
... num = numpy.zeros(shape=2, dtype=dtype)
... num[...] = na[::2]
... num2 = numpy.array([1,3], dtype=dtype)
... print dtype, num == num2, int(num.dtype == num2.dtype)
b [True True] 1
B [True True] 1
h [True True] 1
H [True True] 1
i [True True] 1
I [True True] 1
q [True True] 1
Q [True True] 1
f [True True] 1
d [True True] 1
F [True True] 1
D [True True] 1
Checking numarray-->numpy conversion. Non-strided values. Data copy.
>>> for dtype in dtypes:
... na = numarray.array([1,2,3], dtype=dtype)
... num = numpy.zeros(shape=2, dtype=dtype)
... num = numpy.array(na, copy=1)
... num2 = numpy.array([1,2,3], dtype=dtype)
... print dtype, num == num2, int(num.dtype == num2.dtype)
b [True True True] 1
B [True True True] 1
h [True True True] 1
H [True True True] 1
i [True True True] 1
I [True True True] 1
q [True True True] 1
Q [True True True] 1
f [True True True] 1
d [True True True] 1
F [True True True] 1
D [True True True] 1
Checking numarray-->numpy conversion. Non-strided values. No data copy.
>>> for dtype in dtypes:
... na = numarray.array([1,2,3], dtype=dtype)
... num = numpy.zeros(shape=2, dtype=dtype)
... num = numpy.array(na, copy=0)
... num2 = numpy.array([1,2,3], dtype=dtype)
... print dtype, num == num2, int(num.dtype == num2.dtype)
b [True True True] 1
B [True True True] 1
h [True True True] 1
H [True True True] 1
i [True True True] 1
I [True True True] 1
q [True True True] 1
Q [True True True] 1
f [True True True] 1
d [True True True] 1
F [True True True] 1
D [True True True] 1
Checking numarray-->numpy conversion. Strided values. Data copy.
>>> for dtype in dtypes:
... na = numarray.array([1,2,3], dtype=dtype)
... num = numpy.zeros(shape=2, dtype=dtype)
... num = numpy.array(na[::2], copy=0)
... num2 = numpy.array([1,3], dtype=dtype)
... print dtype, num == num2, int(num.dtype == num2.dtype)
b [True True] 1
B [True True] 1
h [True True] 1
H [True True] 1
i [True True] 1
I [True True] 1
q [True True] 1
Q [True True] 1
f [True True] 1
d [True True] 1
F [True True] 1
D [True True] 1
Checking numarray-->numpy conversion. Strided values. No data copy.
>>> for dtype in dtypes:
... na = numarray.array([1,2,3], dtype=dtype)
... num = numpy.zeros(shape=2, dtype=dtype)
... num = numpy.array(na[::2], copy=1)
... num2 = numpy.array([1,3], dtype=dtype)
... print dtype, num == num2, int(num.dtype == num2.dtype)
b [True True] 1
B [True True] 1
h [True True] 1
H [True True] 1
i [True True] 1
I [True True] 1
q [True True] 1
Q [True True] 1
f [True True] 1
d [True True] 1
F [True True] 1
D [True True] 1
Checking numarray-->numpy conversion. Offseted values. Data copy.
>>> for dtype in dtypes:
... na = numarray.array([1,2,3], dtype=dtype)
... num = numpy.zeros(shape=2, dtype=dtype)
... num = numpy.array(na[1:], copy=1)
... num2 = numpy.array([2,3], dtype=dtype)
... print dtype, num == num2, int(num.dtype == num2.dtype)
b [True True] 1
B [True True] 1
h [True True] 1
H [True True] 1
i [True True] 1
I [True True] 1
q [True True] 1
Q [True True] 1
f [True True] 1
d [True True] 1
F [True True] 1
D [True True] 1
Checking numarray-->numpy conversion. Offseted values. No data copy.
>>> for dtype in dtypes:
... na = numarray.array([1,2,3], dtype=dtype)
... num = numpy.zeros(shape=2, dtype=dtype)
... num = numpy.array(na[1:], copy=0)
... num2 = numpy.array([2,3], dtype=dtype)
... print dtype, num == num2, int(num.dtype == num2.dtype)
b [True True] 1
B [True True] 1
h [True True] 1
H [True True] 1
i [True True] 1
I [True True] 1
q [True True] 1
Q [True True] 1
f [True True] 1
d [True True] 1
F [True True] 1
D [True True] 1
Checking numarray<--numpy assignment. Non-strided values. Data copy.
>>> for dtype in dtypes:
... num = numpy.array([1,2,3], dtype=dtype)
... na = numarray.zeros(shape=3, dtype=dtype)
... na[...] = num
... nb = numarray.array([1,2,3], dtype=dtype)
... print dtype, na == nb, int(na.type() == nb.type())
b [1 1 1] 1
B [1 1 1] 1
h [1 1 1] 1
H [1 1 1] 1
i [1 1 1] 1
I [1 1 1] 1
q [1 1 1] 1
Q [1 1 1] 1
f [1 1 1] 1
d [1 1 1] 1
F [1 1 1] 1
D [1 1 1] 1
Checking numarray<--numpy assignment. Strided values. Data copy.
>>> for dtype in dtypes:
... num = numpy.array([1,2,3], dtype=dtype)
... na = numarray.zeros(shape=2, dtype=dtype)
... na[...] = num[::2]
... nb = numarray.array([1,3], dtype=dtype)
... print dtype, na == nb, int(na.type() == nb.type())
b [1 1] 1
B [1 1] 1
h [1 1] 1
H [1 1] 1
i [1 1] 1
I [1 1] 1
q [1 1] 1
Q [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
Checking numpy-->numarray conversion. Non-strided values. Data copy.
>>> for dtype in dtypes:
... num = numpy.array([1,2,3], dtype=dtype)
... na = numarray.zeros(shape=2, dtype=dtype)
... na = numarray.array(num, copy=1)
... nb = numarray.array([1,2,3], dtype=dtype)
... print dtype, na == nb, int(na.type() == nb.type())
b [1 1 1] 1
B [1 1 1] 1
h [1 1 1] 1
H [1 1 1] 1
i [1 1 1] 1
I [1 1 1] 1
q [1 1 1] 1
Q [1 1 1] 1
f [1 1 1] 1
d [1 1 1] 1
F [1 1 1] 1
D [1 1 1] 1
Checking numpy-->numarray conversion. Non-strided values. No data copy.
>>> for dtype in dtypes:
... num = numpy.array([1,2,3], dtype=dtype)
... na = numarray.zeros(shape=2, dtype=dtype)
... na = numarray.array(num, copy=0)
... nb = numarray.array([1,2,3], dtype=dtype)
... print dtype, na == nb, int(na.type() == nb.type())
b [1 1 1] 1
B [1 1 1] 1
h [1 1 1] 1
H [1 1 1] 1
i [1 1 1] 1
I [1 1 1] 1
q [1 1 1] 1
Q [1 1 1] 1
f [1 1 1] 1
d [1 1 1] 1
F [1 1 1] 1
D [1 1 1] 1
Checking numpy-->numarray conversion. Strided values. Data copy.
>>> for dtype in dtypes:
... num = numpy.array([1,2,3], dtype=dtype)
... na = numarray.zeros(shape=2, dtype=dtype)
... na = numarray.array(num[::2], copy=1)
... nb = numarray.array([1,3], dtype=dtype)
... print dtype, na == nb, int(na.type() == nb.type())
b [1 1] 1
B [1 1] 1
h [1 1] 1
H [1 1] 1
i [1 1] 1
I [1 1] 1
q [1 1] 1
Q [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
Checking numpy-->numarray conversion. Strided values. No data copy.
>>> for dtype in dtypes:
... num = numpy.array([1,2,3], dtype=dtype)
... na = numarray.zeros(shape=2, dtype=dtype)
... na = numarray.array(num[::2], copy=0)
... nb = numarray.array([1,3], dtype=dtype)
... print dtype, na == nb, int(na.type() == nb.type())
b [1 1] 1
B [1 1] 1
h [1 1] 1
H [1 1] 1
i [1 1] 1
I [1 1] 1
q [1 1] 1
Q [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
Checking numpy-->numarray conversion. Offseted values. Data copy.
>>> for dtype in dtypes:
... num = numpy.array([1,2,3], dtype=dtype)
... na = numarray.zeros(shape=2, dtype=dtype)
... na = numarray.array(num[1:], copy=1)
... nb = numarray.array([2,3], dtype=dtype)
... print dtype, na == nb, int(na.type() == nb.type())
b [1 1] 1
B [1 1] 1
h [1 1] 1
H [1 1] 1
i [1 1] 1
I [1 1] 1
q [1 1] 1
Q [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
Checking numpy-->numarray conversion. Offseted values. No data copy.
>>> for dtype in dtypes:
... num = numpy.array([1,2,3], dtype=dtype)
... na = numarray.zeros(shape=2, dtype=dtype)
... na = numarray.array(num[1:], copy=0)
... nb = numarray.array([2,3], dtype=dtype)
... print dtype, na == nb, int(na.type() == nb.type())
b [1 1] 1
B [1 1] 1
h [1 1] 1
H [1 1] 1
i [1 1] 1
I [1 1] 1
q [1 1] 1
Q [1 1] 1
f [1 1] 1
d [1 1] 1
F [1 1] 1
D [1 1] 1
"""
def test():
import doctest, array_protocol
return doctest.testmod(array_protocol)
if __name__ == "__main__":
test()
| 25.020833
| 77
| 0.579814
| 3,154
| 16,814
| 3.085606
| 0.031072
| 0.120838
| 0.114057
| 0.062474
| 0.930847
| 0.920058
| 0.918825
| 0.912557
| 0.900843
| 0.896732
| 0
| 0.092493
| 0.22517
| 16,814
| 671
| 78
| 25.058122
| 0.654513
| 0.973534
| 0
| 0.571429
| 0
| 0
| 0.019277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.238095
| true
| 0.095238
| 0.333333
| 0
| 0.619048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 11
|
7be43614e727c60b8bf8d666cd90d2e72641ecd9
| 68,639
|
py
|
Python
|
benchmarks/SimResults/combinations_spec_locality/cmp_bwavesgccmcfleslie3d/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_locality/cmp_bwavesgccmcfleslie3d/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/combinations_spec_locality/cmp_bwavesgccmcfleslie3d/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 4.72345e-06,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202693,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 2.02403e-05,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.347313,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.601421,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.344932,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.29367,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.343302,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.54895,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 3.82383e-06,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0125904,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0910465,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0931135,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0910503,
'Execution Unit/Register Files/Runtime Dynamic': 0.105704,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.220007,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.565341,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.57278,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00392745,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00392745,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00343906,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.0013413,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00133758,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0126315,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0370038,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0895124,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.69376,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.337064,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.304024,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.19283,
'Instruction Fetch Unit/Runtime Dynamic': 0.780237,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0690669,
'L2/Runtime Dynamic': 0.0155266,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.94674,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.32432,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0876627,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0876628,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.36239,
'Load Store Unit/Runtime Dynamic': 1.84431,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.216161,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.432323,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0767164,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0774717,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.354017,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0560921,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.645143,
'Memory Management Unit/Runtime Dynamic': 0.133564,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 23.3801,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 1.30032e-05,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0177598,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.179809,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.197582,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.54399,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0498229,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.241822,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.266868,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.114592,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.184833,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0932975,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.392723,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0901452,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.47063,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0504171,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00480652,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.053499,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0355471,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.103916,
'Execution Unit/Register Files/Runtime Dynamic': 0.0403536,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.125166,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.311805,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.39208,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000322766,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000322766,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000296288,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000122988,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000510637,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00145246,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00255307,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0341724,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.17365,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0780257,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.116065,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.49766,
'Instruction Fetch Unit/Runtime Dynamic': 0.232268,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0460378,
'L2/Runtime Dynamic': 0.00374756,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.60591,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.661816,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0442836,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0442837,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.81503,
'Load Store Unit/Runtime Dynamic': 0.924492,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.109196,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.218392,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.038754,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0394436,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.13515,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0127968,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.357831,
'Memory Management Unit/Runtime Dynamic': 0.0522404,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.7767,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.132625,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0067841,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0562553,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.195664,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.80049,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0910043,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.146787,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0740929,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.311884,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.104084,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.02642,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00381713,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.027603,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.02823,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.027603,
'Execution Unit/Register Files/Runtime Dynamic': 0.0320472,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0581517,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.169518,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.12151,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000479496,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000479496,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000421528,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000165306,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000405526,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00178605,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00445847,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0271382,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.72622,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0530833,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0921737,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.02851,
'Instruction Fetch Unit/Runtime Dynamic': 0.17864,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.033737,
'L2/Runtime Dynamic': 0.00795641,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.29632,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.520584,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0342676,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0342675,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.45814,
'Load Store Unit/Runtime Dynamic': 0.723847,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0844981,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.168996,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0299887,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0304948,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.10733,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00870376,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.314954,
'Memory Management Unit/Runtime Dynamic': 0.0391986,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.4512,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00410587,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0479361,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0520419,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.1232,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0358538,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.23085,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.185183,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.090258,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.145583,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0734853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.309326,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0748382,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.30031,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0349851,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00378582,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0411426,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0279985,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0761277,
'Execution Unit/Register Files/Runtime Dynamic': 0.0317843,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0956413,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.254774,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.23211,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000261061,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000261061,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000227814,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 8.84257e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000402201,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00115214,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00248766,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0269157,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.71207,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0737253,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0914177,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.01367,
'Instruction Fetch Unit/Runtime Dynamic': 0.195698,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0594532,
'L2/Runtime Dynamic': 0.016644,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.31416,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.545885,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0348449,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0348448,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.47871,
'Load Store Unit/Runtime Dynamic': 0.752572,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0859216,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.171843,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0304939,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0313747,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.10645,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0121218,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.314942,
'Memory Management Unit/Runtime Dynamic': 0.0434965,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.7566,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0920295,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00519217,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0446444,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.141866,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.38239,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 5.817101355307589,
'Runtime Dynamic': 5.817101355307589,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.298219,
'Runtime Dynamic': 0.0868154,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 68.6627,
'Peak Power': 101.775,
'Runtime Dynamic': 12.9369,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 68.3645,
'Total Cores/Runtime Dynamic': 12.8501,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.298219,
'Total L3s/Runtime Dynamic': 0.0868154,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.097374
| 124
| 0.682149
| 8,087
| 68,639
| 5.783851
| 0.067392
| 0.123487
| 0.112883
| 0.093385
| 0.938598
| 0.930731
| 0.917283
| 0.88763
| 0.861761
| 0.841899
| 0
| 0.132249
| 0.224231
| 68,639
| 914
| 125
| 75.097374
| 0.746169
| 0
| 0
| 0.642232
| 0
| 0
| 0.657124
| 0.048077
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7bf8052d64d7f0f297c1530c45850cc5b9a7c990
| 2,430
|
py
|
Python
|
tests/test_comparable.py
|
eblade/jsondb
|
6464f2a761e562477413bc4bb62c604f95a71a4e
|
[
"MIT"
] | 3
|
2016-11-22T23:16:56.000Z
|
2021-12-14T03:43:16.000Z
|
tests/test_comparable.py
|
eblade/jsondb
|
6464f2a761e562477413bc4bb62c604f95a71a4e
|
[
"MIT"
] | null | null | null |
tests/test_comparable.py
|
eblade/jsondb
|
6464f2a761e562477413bc4bb62c604f95a71a4e
|
[
"MIT"
] | null | null | null |
import pytest
from lindh.jsondb import Comparable
@pytest.mark.parametrize('a,b,expected', [
('a', 'b', True),
('b', 'a', False),
('a', 'a', False),
('a', None, False),
(None, 'a', True),
('a', any, True),
(any, 'a', False),
(None, None, False),
(any, any, False),
(1, 1, False),
(1, 2, True),
(2, 1, False),
(0, None, False),
(None, 0, True),
(any, 0, False),
(0, any, True),
(1, 'a', True),
('1', 'a', True),
('a', '1', False),
('a', 1, False),
])
def test_less_than(a, b, expected):
a = Comparable(a)
b = Comparable(b)
assert (a < b) is expected
@pytest.mark.parametrize('a,b,expected', [
('a', 'b', False),
('b', 'a', True),
('a', 'a', False),
('a', None, True),
(None, 'a', False),
('a', any, False),
(any, 'a', True),
(None, None, False),
(any, any, False),
(1, 1, False),
(1, 2, False),
(2, 1, True),
(0, None, True),
(None, 0, False),
(any, 0, True),
(0, any, False),
(1, 'a', False),
('1', 'a', False),
('a', '1', True),
('a', 1, True),
])
def test_greater_than(a, b, expected):
a = Comparable(a)
b = Comparable(b)
assert (a > b) is expected
@pytest.mark.parametrize('a,b,expected', [
('a', 'b', True),
('b', 'a', False),
('a', 'a', True),
('a', None, False),
(None, 'a', True),
('a', any, True),
(any, 'a', False),
(None, None, True),
(any, any, True),
(1, 1, True),
(1, 2, True),
(2, 1, False),
(0, None, False),
(None, 0, True),
(any, 0, False),
(0, any, True),
(1, 'a', True),
('1', 'a', True),
('a', '1', False),
('a', 1, False),
])
def test_less_or_equal(a, b, expected):
a = Comparable(a)
b = Comparable(b)
assert (a <= b) is expected
@pytest.mark.parametrize('a,b,expected', [
('a', 'b', False),
('b', 'a', True),
('a', 'a', True),
('a', None, True),
(None, 'a', False),
('a', any, False),
(any, 'a', True),
(None, None, True),
(any, any, True),
(1, 1, True),
(1, 2, False),
(2, 1, True),
(0, None, True),
(None, 0, False),
(any, 0, True),
(0, any, False),
(1, 'a', False),
('1', 'a', False),
('a', '1', True),
('a', 1, True),
])
def test_greater_or_equal(a, b, expected):
a = Comparable(a)
b = Comparable(b)
assert (a >= b) is expected
| 21.130435
| 42
| 0.45144
| 345
| 2,430
| 3.150725
| 0.078261
| 0.036799
| 0.073597
| 0.080957
| 0.960442
| 0.943882
| 0.943882
| 0.943882
| 0.943882
| 0.943882
| 0
| 0.032333
| 0.287243
| 2,430
| 114
| 43
| 21.315789
| 0.595266
| 0
| 0
| 0.90566
| 0
| 0
| 0.046091
| 0
| 0
| 0
| 0
| 0
| 0.037736
| 1
| 0.037736
| false
| 0
| 0.018868
| 0
| 0.056604
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d0008580d32e505816bb206ef131a16aee00b262
| 62
|
py
|
Python
|
calc.py
|
syedshahab698/UnitTesting_In_Python
|
31c5eb5655da5d27a03360aa55154bea6aae8fc2
|
[
"MIT"
] | null | null | null |
calc.py
|
syedshahab698/UnitTesting_In_Python
|
31c5eb5655da5d27a03360aa55154bea6aae8fc2
|
[
"MIT"
] | null | null | null |
calc.py
|
syedshahab698/UnitTesting_In_Python
|
31c5eb5655da5d27a03360aa55154bea6aae8fc2
|
[
"MIT"
] | null | null | null |
def add_(a,b):
return a+b
def sub_(a,b):
return a-b
| 8.857143
| 14
| 0.548387
| 14
| 62
| 2.285714
| 0.428571
| 0.25
| 0.5
| 0.5625
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.290323
| 62
| 7
| 15
| 8.857143
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
d01bfa16bc9cac5ea5626167f50d8adcc2195157
| 84
|
py
|
Python
|
pyimagesource/__init__.py
|
Fhrozen/pyimagesource
|
0bd3c8de484694877ea1d152448b7bc0b31b279a
|
[
"Apache-2.0"
] | 6
|
2020-06-27T09:55:46.000Z
|
2022-03-28T01:01:37.000Z
|
pyimagesource/__init__.py
|
Fhrozen/ism_rir
|
0bd3c8de484694877ea1d152448b7bc0b31b279a
|
[
"Apache-2.0"
] | null | null | null |
pyimagesource/__init__.py
|
Fhrozen/ism_rir
|
0bd3c8de484694877ea1d152448b7bc0b31b279a
|
[
"Apache-2.0"
] | 1
|
2019-12-05T08:22:31.000Z
|
2019-12-05T08:22:31.000Z
|
from .bank import audiodata # NOQA
from .bank import Room_Impulse_Response # NOQA
| 28
| 47
| 0.785714
| 12
| 84
| 5.333333
| 0.666667
| 0.25
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 84
| 2
| 48
| 42
| 0.914286
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d0f0d2e22638265dcd8c0c4e277641f1fdc39bf6
| 97
|
py
|
Python
|
rouge_papier_v2/rouge_papier_v2/__init__.py
|
BambooPalace/text-summarization
|
17ac68598563492b5e8959493b2bf1b137f78a5a
|
[
"MIT"
] | 54
|
2019-09-20T12:31:10.000Z
|
2022-03-19T12:21:32.000Z
|
rouge_papier_v2/rouge_papier_v2/__init__.py
|
huaweicould-ei/ExtSummLongDoc
|
43da8584a1ec5df6ed31a844285a12b71eb2b4a8
|
[
"MIT"
] | 9
|
2019-11-25T06:17:11.000Z
|
2022-03-23T04:08:53.000Z
|
rouge_papier_v2/rouge_papier_v2/__init__.py
|
huaweicould-ei/ExtSummLongDoc
|
43da8584a1ec5df6ed31a844285a12b71eb2b4a8
|
[
"MIT"
] | 12
|
2019-12-08T10:06:05.000Z
|
2022-03-06T08:10:53.000Z
|
from .wrapper import compute_rouge
from .generate import compute_extract, compute_pairwise_ranks
| 32.333333
| 61
| 0.876289
| 13
| 97
| 6.230769
| 0.692308
| 0.320988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092784
| 97
| 2
| 62
| 48.5
| 0.920455
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ef83ce44801ee6baefc4a26db40fb815a96a2a07
| 5,073
|
py
|
Python
|
srp/tests/test_recognition.py
|
sonerkcardak/python-detailed-assistant
|
161b82289c5ae7149fe638ba6a5192b6aa6833d8
|
[
"Apache-2.0"
] | null | null | null |
srp/tests/test_recognition.py
|
sonerkcardak/python-detailed-assistant
|
161b82289c5ae7149fe638ba6a5192b6aa6833d8
|
[
"Apache-2.0"
] | null | null | null |
srp/tests/test_recognition.py
|
sonerkcardak/python-detailed-assistant
|
161b82289c5ae7149fe638ba6a5192b6aa6833d8
|
[
"Apache-2.0"
] | 1
|
2020-02-16T14:25:42.000Z
|
2020-02-16T14:25:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import unittest
import speech_recognition as sr
class TestRecognition(unittest.TestCase):
def setUp(self):
self.AUDIO_FILE_EN = os.path.join(os.path.dirname(os.path.realpath(__file__)), "english.wav")
self.AUDIO_FILE_FR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "french.aiff")
self.AUDIO_FILE_ZH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "chinese.flac")
def test_sphinx_english(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_EN) as source: audio = r.record(source)
self.assertEqual(r.recognize_sphinx(audio), "one two three")
def test_google_english(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_EN) as source: audio = r.record(source)
self.assertIn(r.recognize_google(audio), ["1 2 3", "one two three"])
def test_google_french(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_FR) as source: audio = r.record(source)
self.assertEqual(r.recognize_google(audio, language="fr-FR"), u"et c'est la dictée numéro 1")
def test_google_chinese(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_ZH) as source: audio = r.record(source)
self.assertEqual(r.recognize_google(audio, language="zh-CN"), u"砸自己的脚")
@unittest.skipUnless("WIT_AI_KEY" in os.environ, "requires Wit.ai key to be specified in WIT_AI_KEY environment variable")
def test_wit_english(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_EN) as source: audio = r.record(source)
self.assertEqual(r.recognize_wit(audio, key=os.environ["WIT_AI_KEY"]), "one two three")
@unittest.skipUnless("BING_KEY" in os.environ, "requires Microsoft Bing Voice Recognition key to be specified in BING_KEY environment variable")
def test_bing_english(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_EN) as source: audio = r.record(source)
self.assertEqual(r.recognize_bing(audio, key=os.environ["BING_KEY"]), "123.")
@unittest.skipUnless("BING_KEY" in os.environ, "requires Microsoft Bing Voice Recognition key to be specified in BING_KEY environment variable")
def test_bing_french(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_FR) as source: audio = r.record(source)
self.assertEqual(r.recognize_bing(audio, key=os.environ["BING_KEY"], language="fr-FR"), u"Essaye la dictée numéro un.")
@unittest.skipUnless("BING_KEY" in os.environ, "requires Microsoft Bing Voice Recognition key to be specified in BING_KEY environment variable")
def test_bing_chinese(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_ZH) as source: audio = r.record(source)
self.assertEqual(r.recognize_bing(audio, key=os.environ["BING_KEY"], language="zh-CN"), u"砸自己的脚。")
@unittest.skipUnless("HOUNDIFY_CLIENT_ID" in os.environ and "HOUNDIFY_CLIENT_KEY" in os.environ, "requires Houndify client ID and client key to be specified in HOUNDIFY_CLIENT_ID and HOUNDIFY_CLIENT_KEY environment variables")
def test_houndify_english(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_EN) as source: audio = r.record(source)
self.assertEqual(r.recognize_houndify(audio, client_id=os.environ["HOUNDIFY_CLIENT_ID"], client_key=os.environ["HOUNDIFY_CLIENT_KEY"]), "one two three")
@unittest.skipUnless("IBM_USERNAME" in os.environ and "IBM_PASSWORD" in os.environ, "requires IBM Speech to Text username and password to be specified in IBM_USERNAME and IBM_PASSWORD environment variables")
def test_ibm_english(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_EN) as source: audio = r.record(source)
self.assertEqual(r.recognize_ibm(audio, username=os.environ["IBM_USERNAME"], password=os.environ["IBM_PASSWORD"]), "one two three ")
@unittest.skipUnless("IBM_USERNAME" in os.environ and "IBM_PASSWORD" in os.environ, "requires IBM Speech to Text username and password to be specified in IBM_USERNAME and IBM_PASSWORD environment variables")
def test_ibm_french(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_FR) as source: audio = r.record(source)
self.assertEqual(r.recognize_ibm(audio, username=os.environ["IBM_USERNAME"], password=os.environ["IBM_PASSWORD"], language="fr-FR"), u"si la dictée numéro un ")
@unittest.skipUnless("IBM_USERNAME" in os.environ and "IBM_PASSWORD" in os.environ, "requires IBM Speech to Text username and password to be specified in IBM_USERNAME and IBM_PASSWORD environment variables")
def test_ibm_chinese(self):
r = sr.Recognizer()
with sr.AudioFile(self.AUDIO_FILE_ZH) as source: audio = r.record(source)
self.assertEqual(r.recognize_ibm(audio, username=os.environ["IBM_USERNAME"], password=os.environ["IBM_PASSWORD"], language="zh-CN"), u"砸 自己 的 脚 ")
if __name__ == "__main__":
unittest.main()
| 58.310345
| 230
| 0.718707
| 746
| 5,073
| 4.707775
| 0.131367
| 0.061503
| 0.055524
| 0.058087
| 0.826879
| 0.795843
| 0.768223
| 0.752278
| 0.752278
| 0.752278
| 0
| 0.00212
| 0.163217
| 5,073
| 86
| 231
| 58.988372
| 0.825206
| 0.008476
| 0
| 0.454545
| 0
| 0
| 0.272076
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.19697
| false
| 0.090909
| 0.045455
| 0
| 0.257576
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
ef8cb6fa8dec8a0c2ae22171308c8cb2450e41f0
| 46
|
py
|
Python
|
tests/__main__.py
|
H4CKY54CK/misctools
|
e6f1f944046f07b808d19bb4e4c8fae6264eb428
|
[
"MIT"
] | 3
|
2020-08-23T21:18:09.000Z
|
2021-12-08T15:48:38.000Z
|
tests/__main__.py
|
H4CKY54CK/misctools
|
e6f1f944046f07b808d19bb4e4c8fae6264eb428
|
[
"MIT"
] | 2
|
2020-04-14T09:18:54.000Z
|
2020-07-13T06:09:22.000Z
|
tests/__main__.py
|
H4CKY54CK/misctools
|
e6f1f944046f07b808d19bb4e4c8fae6264eb428
|
[
"MIT"
] | null | null | null |
from . import test_archit
test_archit.test_()
| 15.333333
| 25
| 0.804348
| 7
| 46
| 4.857143
| 0.571429
| 0.588235
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108696
| 46
| 2
| 26
| 23
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
efb1f61c27e19d3f317a85f3e1e01141c409ce5a
| 5,042
|
py
|
Python
|
src/models/base_models/resnet.py
|
Charl-AI/lightning-template
|
1cd06d34f5294171768c9078c67eb34a180ce48b
|
[
"Apache-2.0"
] | null | null | null |
src/models/base_models/resnet.py
|
Charl-AI/lightning-template
|
1cd06d34f5294171768c9078c67eb34a180ce48b
|
[
"Apache-2.0"
] | 10
|
2021-09-19T16:07:03.000Z
|
2022-02-13T11:36:19.000Z
|
src/models/base_models/resnet.py
|
Charl-AI/lightning-template
|
1cd06d34f5294171768c9078c67eb34a180ce48b
|
[
"Apache-2.0"
] | null | null | null |
"""Standard torchvision implementations at:
https://pytorch.org/vision/0.8/_modules/torchvision/models/resnet.html"""
import torch
from torchvision.models.resnet import ResNet, BasicBlock, Bottleneck
class ResNet18(ResNet):
"""ResNet 18, based on torchvision implementation [BSD 3-Clause License].
Modified to allow for different numbers of input channels (e.g grayscale).
If you want a pretrained model, use the official torchvision implementation
(pretrained models only exist for 3-channel inputs, so the channel
modifications made here would be useless anyway).
Input to forward method: Image Tensor, size [Bx in_channels xHxW]
Output of forward method: Predictions Tensor, size [Bx num_classes]
"""
def __init__(self, in_channels: int = 3, out_classes: int = 10):
super().__init__(BasicBlock, [2, 2, 2, 2], num_classes=out_classes)
# simply change the first layer to accept the number of input channels
self.conv1 = torch.nn.Conv2d(
in_channels,
64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False,
)
class ResNet34(ResNet):
"""ResNet 34, based on torchvision implementation [BSD 3-Clause License].
Modified to allow for different numbers of input channels (e.g grayscale).
If you want a pretrained model, use the official torchvision implementation
(pretrained models only exist for 3-channel inputs, so the channel
modifications made here would be useless anyway).
Input to forward method: Image Tensor, size [Bx in_channels xHxW]
Output of forward method: Predictions Tensor, size [Bx num_classes]
"""
def __init__(self, in_channels: int = 3, out_classes: int = 10):
super().__init__(BasicBlock, [3, 4, 6, 3], num_classes=out_classes)
# simply change the first layer to accept the number of input channels
self.conv1 = torch.nn.Conv2d(
in_channels,
64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False,
)
class ResNet50(ResNet):
"""ResNet 50, based on torchvision implementation [BSD 3-Clause License].
Modified to allow for different numbers of input channels (e.g grayscale).
If you want a pretrained model, use the official torchvision implementation
(pretrained models only exist for 3-channel inputs, so the channel
modifications made here would be useless anyway).
Input to forward method: Image Tensor, size [Bx in_channels xHxW]
Output of forward method: Predictions Tensor, size [Bx num_classes]
"""
def __init__(self, in_channels: int = 3, out_classes: int = 10):
super().__init__(Bottleneck, [3, 4, 6, 3], num_classes=out_classes)
# simply change the first layer to accept the number of input channels
self.conv1 = torch.nn.Conv2d(
in_channels,
64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False,
)
class ResNet101(ResNet):
"""ResNet 101, based on torchvision implementation [BSD 3-Clause License].
Modified to allow for different numbers of input channels (e.g grayscale).
If you want a pretrained model, use the official torchvision implementation
(pretrained models only exist for 3-channel inputs, so the channel
modifications made here would be useless anyway).
Input to forward method: Image Tensor, size [Bx in_channels xHxW]
Output of forward method: Predictions Tensor, size [Bx num_classes]
"""
def __init__(self, in_channels: int = 3, out_classes: int = 10):
super().__init__(Bottleneck, [3, 4, 23, 3], num_classes=out_classes)
# simply change the first layer to accept the number of input channels
self.conv1 = torch.nn.Conv2d(
in_channels,
64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False,
)
class ResNet152(ResNet):
"""ResNet 152, based on torchvision implementation [BSD 3-Clause License].
Modified to allow for different numbers of input channels (e.g grayscale).
If you want a pretrained model, use the official torchvision implementation
(pretrained models only exist for 3-channel inputs, so the channel
modifications made here would be useless anyway).
Input to forward method: Image Tensor, size [Bx in_channels xHxW]
Output of forward method: Predictions Tensor, size [Bx num_classes]
"""
def __init__(self, in_channels: int = 3, out_classes: int = 10):
super().__init__(Bottleneck, [3, 8, 36, 3], num_classes=out_classes)
# simply change the first layer to accept the number of input channels
self.conv1 = torch.nn.Conv2d(
in_channels,
64,
kernel_size=(7, 7),
stride=(2, 2),
padding=(3, 3),
bias=False,
)
| 35.758865
| 79
| 0.656486
| 670
| 5,042
| 4.819403
| 0.164179
| 0.046454
| 0.046454
| 0.049551
| 0.909879
| 0.909879
| 0.909879
| 0.909879
| 0.909879
| 0.909879
| 0
| 0.032949
| 0.259619
| 5,042
| 140
| 80
| 36.014286
| 0.832039
| 0.562079
| 0
| 0.701754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.035088
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
efc520cb6fbec8f8ec0adce302eca6b115ac65a6
| 35,914
|
py
|
Python
|
diff_representation/model/autoencoder.py
|
microsoft/iclr2019-learning-to-represent-edits
|
e5777d6aa6cdeda500cf076646177c48d1cb4622
|
[
"MIT"
] | 8
|
2021-03-15T18:57:18.000Z
|
2021-08-23T11:28:22.000Z
|
diff_representation/model/autoencoder.py
|
microsoft/iclr2019-learning-to-represent-edits
|
e5777d6aa6cdeda500cf076646177c48d1cb4622
|
[
"MIT"
] | null | null | null |
diff_representation/model/autoencoder.py
|
microsoft/iclr2019-learning-to-represent-edits
|
e5777d6aa6cdeda500cf076646177c48d1cb4622
|
[
"MIT"
] | 4
|
2021-03-27T14:19:09.000Z
|
2021-09-13T12:35:31.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import string
from diff_representation.model import utils
from diff_representation.model.bag_of_edits_change_encoder import BagOfEditsChangeEncoder
from diff_representation.model.graph_change_encoder import GraphChangeEncoder
from diff_representation.model.graph_code_encoder import GraphCodeEncoder
from diff_representation.model.hybrid_change_encoder import HybridChangeEncoder
from diff_representation.model.sequential_change_encoder import SequentialChangeEncoder
from diff_representation.model.transition_decoder import TransitionDecoder, TransitionDecoderWithGraphEncoder
from .embedder import CodeTokenEmbedder, SyntaxTreeEmbedder, EmbeddingTable, ConvolutionalCharacterEmbedder
from .encoder import *
from .sequential_decoder import *
class SequentialAutoEncoder(nn.Module):
def __init__(self,
token_embed_size, token_encoding_size, change_vector_size, change_tag_embed_size,
decoder_hidden_size, decoder_dropout, init_decode_vec_encoder_state_dropout,
vocab,
no_change_vector=False,
no_unchanged_token_encoding_in_diff_seq=False,
no_copy=False,
change_encoder_type='word',
token_embedder='word'):
self.args = utils.get_method_args_dict(self.__init__, locals())
super(SequentialAutoEncoder, self).__init__()
if token_embedder == 'word':
self.syntax_token_embedder = CodeTokenEmbedder(token_embed_size, vocab)
elif token_embedder == 'char':
self.syntax_token_embedder = ConvolutionalCharacterEmbedder(token_embed_size, max_character_size=20)
self.sequential_code_encoder = SequentialCodeEncoder(token_embed_size, token_encoding_size,
code_token_embedder=self.syntax_token_embedder,
vocab=vocab)
if change_encoder_type == 'word':
self.code_change_encoder = SequentialChangeEncoder(token_encoding_size, change_vector_size,
change_tag_embed_size,
vocab,
no_unchanged_token_encoding_in_diff_seq=no_unchanged_token_encoding_in_diff_seq)
elif change_encoder_type == 'bag':
self.code_change_encoder = BagOfEditsChangeEncoder(self.syntax_token_embedder.weight,
vocab)
self.decoder = SequentialDecoder(token_embed_size, token_encoding_size, change_vector_size, decoder_hidden_size,
dropout=decoder_dropout,
init_decode_vec_encoder_state_dropout=init_decode_vec_encoder_state_dropout,
code_token_embedder=self.syntax_token_embedder,
vocab=vocab,
no_copy=no_copy)
self.vocab = vocab
@property
def device(self):
return self.code_change_encoder.device
def forward(self, examples, return_change_vectors=False):
previous_code_chunk_list = [e.previous_code_chunk for e in examples]
updated_code_chunk_list = [e.updated_code_chunk for e in examples]
context_list = [e.context for e in examples]
embedding_cache = EmbeddingTable(
chain.from_iterable(previous_code_chunk_list + updated_code_chunk_list + context_list))
self.syntax_token_embedder.populate_embedding_table(embedding_cache)
batched_prev_code = self.sequential_code_encoder.encode(previous_code_chunk_list,
embedding_cache=embedding_cache)
batched_updated_code = self.sequential_code_encoder.encode(updated_code_chunk_list,
embedding_cache=embedding_cache)
batched_context = self.sequential_code_encoder.encode(context_list, embedding_cache=embedding_cache)
if self.args['no_change_vector'] is False:
change_vectors = self.code_change_encoder(examples, batched_prev_code, batched_updated_code)
else:
change_vectors = torch.zeros(batched_updated_code.batch_size, self.args['change_vector_size'], device=self.device)
scores = self.decoder(examples, batched_prev_code, batched_context, change_vectors, embedding_cache=embedding_cache)
if return_change_vectors:
return scores, change_vectors
else:
return scores
def decode_updated_code(self, example, with_change_vec=False, change_vec=None, beam_size=5, debug=False):
previous_code_chunk_list = [example.previous_code_chunk]
updated_code_chunk_list = [example.updated_code_chunk]
context_list = [example.context]
embedding_cache = EmbeddingTable(
chain.from_iterable(previous_code_chunk_list + updated_code_chunk_list + context_list))
self.syntax_token_embedder.populate_embedding_table(embedding_cache)
batched_prev_code = self.sequential_code_encoder.encode(previous_code_chunk_list,
embedding_cache=embedding_cache)
batched_updated_code = self.sequential_code_encoder.encode(updated_code_chunk_list,
embedding_cache=embedding_cache)
batched_context = self.sequential_code_encoder.encode(context_list, embedding_cache=embedding_cache)
if change_vec is not None:
change_vectors = torch.from_numpy(change_vec).to(self.device)
if len(change_vectors.size()) == 1:
change_vectors = change_vectors.unsqueeze(0)
elif with_change_vec:
change_vectors = self.code_change_encoder([example], batched_prev_code, batched_updated_code)
else:
change_vectors = torch.zeros(batched_updated_code.batch_size, self.args['change_vector_size'],
device=self.device)
hypotheses = self.decoder.beam_search_with_source_encodings(example.previous_code_chunk, batched_prev_code,
example.context, batched_context,
change_vectors,
beam_size=beam_size, max_decoding_time_step=70,
debug=debug)
return hypotheses
def save(self, model_path):
params = {
'args': self.args,
'vocab': self.vocab,
'state_dict': self.state_dict()
}
torch.save(params, model_path)
@staticmethod
def load(model_path, use_cuda=True):
device = torch.device("cuda:0" if use_cuda else "cpu")
params = torch.load(model_path, map_location=lambda storage, loc: storage)
args = params['args']
model = SequentialAutoEncoder(vocab=params['vocab'], **args)
model.load_state_dict(params['state_dict'])
model = model.to(device)
return model
class TreeBasedAutoEncoder(nn.Module):
def __init__(self,
token_embed_size, token_encoding_size, change_vector_size, change_tag_embed_size,
action_embed_size, field_embed_size,
decoder_hidden_size, decoder_dropout, init_decode_vec_encoder_state_dropout,
vocab,
grammar,
mode,
no_change_vector=False,
no_unchanged_token_encoding_in_diff_seq=False,
use_syntax_token_rnn=False,
token_embedder='word'):
self.args = utils.get_method_args_dict(self.__init__, locals())
super(TreeBasedAutoEncoder, self).__init__()
if token_embedder == 'word':
self.syntax_token_embedder = SyntaxTreeEmbedder(token_embed_size, vocab, grammar)
elif token_embedder == 'char':
self.syntax_token_embedder = ConvolutionalCharacterEmbedder(token_embed_size, max_character_size=20)
self.code_change_encoder = SequentialChangeEncoder(token_encoding_size, change_vector_size, change_tag_embed_size,
vocab,
no_unchanged_token_encoding_in_diff_seq=no_unchanged_token_encoding_in_diff_seq)
self.sequential_code_encoder = SequentialCodeEncoder(token_embed_size, token_encoding_size,
code_token_embedder=self.syntax_token_embedder,
vocab=vocab)
self.decoder = TransitionDecoder(token_encoding_size, change_vector_size, decoder_hidden_size,
action_embed_size, field_embed_size,
dropout=decoder_dropout,
init_decode_vec_encoder_state_dropout=init_decode_vec_encoder_state_dropout,
vocab=vocab,
grammar=grammar,
mode=mode,
use_syntax_token_rnn=use_syntax_token_rnn)
self.vocab = vocab
self.grammar = grammar
@property
def device(self):
return self.code_change_encoder.device
def forward(self, examples, return_change_vectors=False):
previous_code_chunk_list = [['<s>'] + e.previous_code_chunk for e in examples]
updated_code_chunk_list = [e.updated_code_chunk for e in examples]
context_list = [e.context for e in examples]
embedding_cache = EmbeddingTable(
chain.from_iterable(previous_code_chunk_list + updated_code_chunk_list + context_list))
self.syntax_token_embedder.populate_embedding_table(embedding_cache)
batched_prev_code = self.sequential_code_encoder.encode(previous_code_chunk_list,
embedding_cache=embedding_cache)
batched_updated_code = self.sequential_code_encoder.encode(updated_code_chunk_list,
embedding_cache=embedding_cache)
batched_context = self.sequential_code_encoder.encode(context_list, embedding_cache=embedding_cache)
if self.args['no_change_vector'] is False:
change_vectors = self.code_change_encoder(examples, batched_prev_code, batched_updated_code)
else:
change_vectors = torch.zeros(batched_updated_code.batch_size, self.args['change_vector_size'], device=self.device)
scores = self.decoder(examples, batched_prev_code, batched_context, change_vectors, embedding_cache=embedding_cache)
if return_change_vectors:
return scores, change_vectors
else:
return scores
def decode_updated_code(self, example, transition_system, with_change_vec=False, change_vec=None, beam_size=5, debug=False):
previous_code_chunk_list = [example.previous_code_chunk]
updated_code_chunk_list = [example.updated_code_chunk]
context_list = [example.context]
embedding_cache = EmbeddingTable(
chain.from_iterable(previous_code_chunk_list + updated_code_chunk_list + context_list))
self.syntax_token_embedder.populate_embedding_table(embedding_cache)
batched_prev_code = self.sequential_code_encoder.encode(previous_code_chunk_list,
embedding_cache=embedding_cache)
batched_updated_code = self.sequential_code_encoder.encode(updated_code_chunk_list,
embedding_cache=embedding_cache)
batched_context = self.sequential_code_encoder.encode(context_list, embedding_cache=embedding_cache)
if change_vec is not None:
change_vectors = torch.from_numpy(change_vec).to(self.device)
if len(change_vectors.size()) == 1:
change_vectors = change_vectors.unsqueeze(0)
elif with_change_vec:
change_vectors = self.code_change_encoder([example], batched_prev_code, batched_updated_code)
else:
change_vectors = torch.zeros(batched_updated_code.batch_size, self.args['change_vector_size'],
device=self.device)
hypotheses = self.decoder.beam_search_with_source_encodings(example.previous_code_chunk, batched_prev_code,
example.context, batched_context,
change_vectors,
beam_size=beam_size, max_decoding_time_step=70,
transition_system=transition_system, debug=debug)
return hypotheses
def save(self, model_path):
params = {
'args': self.args,
'vocab': self.vocab,
'grammar': self.grammar,
'state_dict': self.state_dict()
}
torch.save(params, model_path)
@staticmethod
def load(model_path, use_cuda=True):
device = torch.device("cuda:0" if use_cuda else "cpu")
params = torch.load(model_path, map_location=lambda storage, loc: storage)
args = params['args']
model = TreeBasedAutoEncoder(vocab=params['vocab'], grammar=params['grammar'], **args)
model.load_state_dict(params['state_dict'])
model = model.to(device)
return model
class TreeBasedAutoEncoderWithGraphEncoder(nn.Module):
def __init__(self,
token_embed_size, token_encoding_size, change_vector_size, change_tag_embed_size,
action_embed_size, field_embed_size,
decoder_hidden_size, decoder_dropout, init_decode_vec_encoder_state_dropout,
gnn_layer_timesteps, gnn_residual_connections, gnn_dropout,
vocab,
grammar,
mode,
no_change_vector=False,
no_unchanged_token_encoding_in_diff_seq=False,
use_syntax_token_rnn=False,
change_encoder_type='word',
token_embedder='word',
node_embed_method='type',
no_penalize_apply_tree_when_copy_subtree=False,
encode_change_vec_in_syntax_token_rnn=False,
feed_in_token_rnn_state_to_rule_rnn=False,
fuse_rule_and_token_rnns=False,
gnn_no_token_connection=False,
gnn_no_top_down_connection=False,
gnn_no_bottom_up_connection=False,
gnn_prev_sibling_connection=False,
gnn_next_sibling_connection=False,
copy_identifier=True,
decoder_init_method='avg_pooling',
gnn_use_bias_for_message_linear=True,
change_encoder_master_node_option=None,
no_copy=False):
self.args = utils.get_method_args_dict(self.__init__, locals())
super(TreeBasedAutoEncoderWithGraphEncoder, self).__init__()
self.syntax_tree_node_embedder = SyntaxTreeEmbedder(token_embed_size, vocab, grammar, node_embed_method=node_embed_method)
if token_embedder == 'word':
self.syntax_token_embedder = self.syntax_tree_node_embedder
elif token_embedder == 'char':
self.syntax_token_embedder = ConvolutionalCharacterEmbedder(token_embed_size, max_character_size=20)
self.sequential_code_encoder = SequentialCodeEncoder(token_embed_size, token_encoding_size,
code_token_embedder=self.syntax_token_embedder,
vocab=vocab)
if change_encoder_type == 'word':
self.code_change_encoder = SequentialChangeEncoder(token_encoding_size, change_vector_size, change_tag_embed_size,
vocab,
no_unchanged_token_encoding_in_diff_seq=no_unchanged_token_encoding_in_diff_seq)
elif change_encoder_type == 'graph':
self.code_change_encoder = GraphChangeEncoder(change_vector_size, syntax_tree_embedder=self.syntax_tree_node_embedder,
layer_time_steps=gnn_layer_timesteps,
dropout=gnn_dropout,
gnn_use_bias_for_message_linear=gnn_use_bias_for_message_linear,
master_node_option=change_encoder_master_node_option)
elif change_encoder_type == 'hybrid':
self.code_change_encoder = HybridChangeEncoder(token_encoding_size=token_encoding_size,
change_vector_dim=change_vector_size,
syntax_tree_embedder=self.syntax_tree_node_embedder,
layer_timesteps=gnn_layer_timesteps,
dropout=gnn_dropout,
vocab=vocab,
gnn_use_bias_for_message_linear=gnn_use_bias_for_message_linear)
elif change_encoder_type == 'bag':
self.code_change_encoder = BagOfEditsChangeEncoder(self.syntax_token_embedder.weight,
vocab)
else:
raise ValueError('unknown code change encoder type %s' % change_encoder_type)
self.prev_ast_encoder = GraphCodeEncoder(hidden_size=token_encoding_size,
syntax_tree_embedder=self.syntax_tree_node_embedder,
layer_timesteps=gnn_layer_timesteps, residual_connections=gnn_residual_connections, dropout=gnn_dropout,
vocab=vocab, grammar=grammar,
token_bidirectional_connection=not gnn_no_token_connection,
top_down_connection=not gnn_no_top_down_connection,
bottom_up_connection=not gnn_no_bottom_up_connection,
prev_sibling_connection=gnn_prev_sibling_connection,
next_sibling_connection=gnn_next_sibling_connection,
gnn_use_bias_for_message_linear=gnn_use_bias_for_message_linear)
if '2tree' in mode:
self.decoder = TransitionDecoderWithGraphEncoder(node_encoding_size=token_encoding_size,
change_vector_size=change_vector_size,
hidden_size=decoder_hidden_size,
action_embed_size=action_embed_size,
field_embed_size=field_embed_size,
dropout=decoder_dropout,
init_decode_vec_encoder_state_dropout=init_decode_vec_encoder_state_dropout,
vocab=vocab, grammar=grammar, mode=mode,
syntax_tree_embedder=self.syntax_tree_node_embedder,
use_syntax_token_rnn=use_syntax_token_rnn,
no_penalize_apply_tree_when_copy_subtree=no_penalize_apply_tree_when_copy_subtree,
encode_change_vec_in_syntax_token_rnn=encode_change_vec_in_syntax_token_rnn,
feed_in_token_rnn_state_to_rule_rnn=feed_in_token_rnn_state_to_rule_rnn,
fuse_rule_and_token_rnns=fuse_rule_and_token_rnns,
decoder_init_method=decoder_init_method,
copy_identifier=copy_identifier,
no_copy=no_copy)
else:
self.decoder = SequentialDecoderWithTreeEncoder(token_embed_size, token_encoding_size, change_vector_size,
decoder_hidden_size,
dropout=decoder_dropout,
init_decode_vec_encoder_state_dropout=init_decode_vec_encoder_state_dropout,
code_token_embedder=self.syntax_token_embedder,
vocab=vocab,
decoder_init_method=decoder_init_method)
self.vocab = vocab
self.grammar = grammar
@property
def device(self):
return self.code_change_encoder.device
def forward(self, examples, return_change_vectors=False, **kwargs):
previous_code_chunk_list = [e.previous_code_chunk for e in examples]
updated_code_chunk_list = [e.updated_code_chunk for e in examples]
context_list = [e.context for e in examples]
embedding_cache = EmbeddingTable(chain.from_iterable(previous_code_chunk_list + updated_code_chunk_list + context_list))
self.syntax_token_embedder.populate_embedding_table(embedding_cache)
batched_prev_code = self.sequential_code_encoder.encode(previous_code_chunk_list, embedding_cache=embedding_cache)
batched_updated_code = self.sequential_code_encoder.encode(updated_code_chunk_list, embedding_cache=embedding_cache)
batched_context = self.sequential_code_encoder.encode(context_list, embedding_cache=embedding_cache)
if self.args['no_change_vector'] is False:
change_vectors = self.code_change_encoder(examples, batched_prev_code, batched_updated_code)
else:
change_vectors = torch.zeros(batched_updated_code.batch_size, self.args['change_vector_size'],
device=self.device)
batched_prev_ast_node_encoding, \
batched_prev_ast_node_mask, \
batched_prev_ast_syntax_token_mask = self.prev_ast_encoder([e.prev_code_ast for e in examples], batched_prev_code.encoding)
batched_prev_asts = type('BatchedDatum', (object,), {'encoding': batched_prev_ast_node_encoding,
'mask': batched_prev_ast_node_mask,
'syntax_token_mask': batched_prev_ast_syntax_token_mask})
results = self.decoder(examples, batched_prev_asts, batched_context, change_vectors, embedding_cache=embedding_cache, **kwargs)
if return_change_vectors:
return results, change_vectors
else:
return results
def decode_updated_code(self, example, transition_system, with_change_vec=False, change_vec=None, beam_size=5, debug=False):
previous_code_chunk_list = [example.previous_code_chunk]
updated_code_chunk_list = [example.updated_code_chunk]
context_list = [example.context]
embedding_cache = EmbeddingTable(
chain.from_iterable(previous_code_chunk_list + updated_code_chunk_list + context_list))
self.syntax_token_embedder.populate_embedding_table(embedding_cache)
batched_prev_code = self.sequential_code_encoder.encode(previous_code_chunk_list,
embedding_cache=embedding_cache)
batched_updated_code = self.sequential_code_encoder.encode(updated_code_chunk_list,
embedding_cache=embedding_cache)
batched_context = self.sequential_code_encoder.encode(context_list, embedding_cache=embedding_cache)
if change_vec is not None:
change_vectors = torch.from_numpy(change_vec).to(self.device)
if len(change_vectors.size()) == 1:
change_vectors = change_vectors.unsqueeze(0)
elif with_change_vec:
change_vectors = self.code_change_encoder([example], batched_prev_code, batched_updated_code)
else:
change_vectors = torch.zeros(batched_updated_code.batch_size, self.args['change_vector_size'],
device=self.device)
batched_prev_ast_node_encoding, \
batched_prev_ast_node_mask, \
batched_prev_ast_syntax_token_mask = self.prev_ast_encoder([example.prev_code_ast],
batched_prev_code.encoding)
batched_prev_asts = type('BatchedDatum', (object,), {'encoding': batched_prev_ast_node_encoding,
'mask': batched_prev_ast_node_mask,
'syntax_token_mask': batched_prev_ast_syntax_token_mask})
hypotheses = self.decoder.beam_search_with_source_encodings(example.prev_code_ast, batched_prev_asts,
example.context, batched_context,
change_vectors,
beam_size=beam_size, max_decoding_time_step=70,
transition_system=transition_system, debug=debug)
return hypotheses
def save(self, model_path):
params = {
'args': self.args,
'vocab': self.vocab,
'grammar': self.grammar,
'state_dict': self.state_dict()
}
torch.save(params, model_path)
@staticmethod
def load(model_path, use_cuda=True):
device = torch.device("cuda:0" if use_cuda else "cpu")
params = torch.load(model_path, map_location=lambda storage, loc: storage)
args = params['args']
model = TreeBasedAutoEncoderWithGraphEncoder(vocab=params['vocab'], grammar=params['grammar'], **args)
model.load_state_dict(params['state_dict'])
model = model.to(device)
return model
class Tree2SequenceAutoEncoder(nn.Module):
def __init__(self,
token_embed_size, token_encoding_size, change_vector_size, change_tag_embed_size,
action_embed_size, field_embed_size,
decoder_hidden_size, decoder_dropout, init_decode_vec_encoder_state_dropout,
gnn_layer_timesteps, gnn_residual_connections, gnn_dropout,
vocab,
grammar,
mode,
no_change_vector=False,
no_unchanged_token_encoding_in_diff_seq=False,
use_syntax_token_rnn=False,
token_embedder='word',
node_embed_method='type',
no_penalize_apply_tree_when_copy_subtree=False,
encode_change_vec_in_syntax_token_rnn=False,
feed_in_token_rnn_state_to_rule_rnn=False,
fuse_rule_and_token_rnns=False,
gnn_no_token_connection=False,
gnn_no_top_down_connection=False,
gnn_no_bottom_up_connection=False):
self.args = utils.get_method_args_dict(self.__init__, locals())
super(Tree2SequenceAutoEncoder, self).__init__()
self.syntax_tree_node_embedder = SyntaxTreeEmbedder(token_embed_size, vocab, grammar,
node_embed_method=node_embed_method)
if token_embedder == 'word':
self.syntax_token_embedder = self.syntax_tree_node_embedder
elif token_embedder == 'char':
self.syntax_token_embedder = ConvolutionalCharacterEmbedder(token_embed_size, max_character_size=20)
self.sequential_code_encoder = SequentialCodeEncoder(token_embed_size, token_encoding_size,
code_token_embedder=self.syntax_token_embedder,
vocab=vocab)
self.code_change_encoder = SequentialChangeEncoder(token_encoding_size, change_vector_size, change_tag_embed_size,
vocab,
no_unchanged_token_encoding_in_diff_seq=no_unchanged_token_encoding_in_diff_seq)
self.prev_ast_encoder = GraphCodeEncoder(hidden_size=token_encoding_size,
syntax_tree_embedder=self.syntax_tree_node_embedder,
layer_timesteps=gnn_layer_timesteps,
residual_connections=gnn_residual_connections, dropout=gnn_dropout,
vocab=vocab, grammar=grammar,
token_bidirectional_connection=not gnn_no_token_connection,
top_down_connection=not gnn_no_top_down_connection,
bottom_up_connection=not gnn_no_bottom_up_connection)
self.vocab = vocab
self.grammar = grammar
class WordPredictionMultiTask(nn.Module):
def __init__(self, change_vector_size, vocab, device):
super(WordPredictionMultiTask, self).__init__()
self.vocab = vocab
self.device = device
self.change_vec_to_vocab = nn.Linear(change_vector_size, len(vocab))
self.words_to_discard = {'VAR0', 'int', 'long', 'string', 'float', 'LITERAL', 'var'}
def forward(self, examples, change_vecs):
# change_vecs: (batch_size, change_vec_size)
# (batch_size, max_word_num)
tgt_word_ids, tgt_word_mask = self.get_word_ids_to_predict(examples)
# (batch_size, vocab_size)
log_probs = F.log_softmax(self.change_vec_to_vocab(change_vecs), dim=-1)
tgt_log_probs = torch.gather(log_probs, 1, tgt_word_ids)
tgt_log_probs = (tgt_log_probs * tgt_word_mask).sum(dim=-1)
tgt_log_probs = tgt_log_probs / (tgt_word_mask.sum(dim=-1) + 1e-7) # to avoid underflow
return tgt_log_probs
def get_word_ids_to_predict(self, examples):
tgt_words = []
for example in examples:
example_tgt_words = []
example_tgt_words.extend(filter(lambda x: x not in self.words_to_discard and not all(c in string.punctuation for c in x), example.previous_code_chunk))
example_tgt_words.extend(filter(lambda x: x not in self.words_to_discard and not all(c in string.punctuation for c in x), example.updated_code_chunk))
tgt_words.append(example_tgt_words)
# if len(example_tgt_words) == 0:
# print(example.previous_code_chunk)
# print(example.updated_code_chunk)
max_word_num = max(len(x) for x in tgt_words)
tgt_word_ids = torch.zeros(len(examples), max_word_num, dtype=torch.long, device=self.device)
tgt_word_mask = torch.zeros(len(examples), max_word_num, dtype=torch.float, device=self.device)
for batch_id, example_words in enumerate(tgt_words):
tgt_word_ids[batch_id, :len(example_words)] = torch.LongTensor([self.vocab[word] for word in example_words], device=self.device)
tgt_word_mask[batch_id, :len(example_words)] = 1
return tgt_word_ids, tgt_word_mask
class ChangedWordPredictionMultiTask(nn.Module):
def __init__(self, change_vector_size, vocab, device):
super(ChangedWordPredictionMultiTask, self).__init__()
self.vocab = vocab
self.device = device
self.change_vec_to_vocab = nn.Linear(change_vector_size, len(vocab) * 2)
self.offset = len(vocab)
self.words_to_discard = {'VAR', 'LITERAL', 'var'} # 'int', 'long', 'string', 'float',
def forward(self, examples, change_vecs):
# change_vecs: (batch_size, change_vec_size)
# (batch_size, max_word_num)
tgt_word_ids, tgt_word_mask = self.get_word_ids_to_predict(examples)
if len(tgt_word_ids.size()) == 1:
return None
# (batch_size, vocab_size)
log_probs = F.log_softmax(self.change_vec_to_vocab(change_vecs), dim=-1)
tgt_log_probs = torch.gather(log_probs, 1, tgt_word_ids)
tgt_log_probs = (tgt_log_probs * tgt_word_mask).sum(dim=-1)
tgt_log_probs = tgt_log_probs / (tgt_word_mask.sum(dim=-1) + 1e-7) # to avoid underflow
return tgt_log_probs
def get_changed_words_from_change_seq(self, change_seq):
add_del_words = []
for entry in change_seq:
tag, token = entry
if tag == 'ADD':
add_del_words.append(('ADD', token))
elif tag == 'DEL':
add_del_words.append(('DEL', token))
elif tag == 'REPLACE':
add_del_words.append(('DEL', token[0]))
add_del_words.append(('ADD', token[1]))
add_del_words = list(filter(lambda t: t[1] not in self.words_to_discard and \
not t[1].startswith('VAR') and \
not all(c in string.punctuation for c in t[1]), add_del_words))
return add_del_words
def get_word_ids_to_predict(self, examples):
tgt_words = []
for example in examples:
example_tgt_words = self.get_changed_words_from_change_seq(example.change_seq)
tgt_words.append(example_tgt_words)
max_word_num = max(len(x) for x in tgt_words)
tgt_word_ids = torch.zeros(len(examples), max_word_num, dtype=torch.long, device=self.device)
tgt_word_mask = torch.zeros(len(examples), max_word_num, dtype=torch.float, device=self.device)
for batch_id, example_words in enumerate(tgt_words):
if len(example_words) > 0:
tgt_word_ids[batch_id, :len(example_words)] = torch.LongTensor([self.vocab[word] if tag == 'ADD' else (self.offset + self.vocab[word])
for tag, word in example_words],
device=self.device)
tgt_word_mask[batch_id, :len(example_words)] = 1
return tgt_word_ids, tgt_word_mask
| 55.252308
| 163
| 0.597761
| 3,727
| 35,914
| 5.299973
| 0.066005
| 0.024604
| 0.023693
| 0.025616
| 0.884018
| 0.871209
| 0.847314
| 0.834506
| 0.818559
| 0.810155
| 0
| 0.002238
| 0.340675
| 35,914
| 649
| 164
| 55.337442
| 0.831961
| 0.012251
| 0
| 0.752437
| 0
| 0
| 0.017654
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050682
| false
| 0
| 0.021443
| 0.005848
| 0.124756
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
56014d945daf9e39fde78db46599ec47b0a92684
| 5,848
|
py
|
Python
|
rdmo/questions/migrations/0039_meta.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 77
|
2016-08-09T11:40:20.000Z
|
2022-03-06T11:03:26.000Z
|
rdmo/questions/migrations/0039_meta.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 377
|
2016-07-01T13:59:36.000Z
|
2022-03-30T13:53:19.000Z
|
rdmo/questions/migrations/0039_meta.py
|
Raspeanut/rdmo
|
9f785010a499c372a2f8368ccf76d2ea4150adcb
|
[
"Apache-2.0"
] | 47
|
2016-06-23T11:32:19.000Z
|
2022-03-01T11:34:37.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-30 14:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0038_rename_de_to_lang2'),
]
operations = [
migrations.AlterField(
model_name='catalog',
name='title_lang1',
field=models.CharField(help_text='The title for this catalog in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang2',
field=models.CharField(help_text='The title for this catalog in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='question',
name='help_lang1',
field=models.TextField(blank=True, help_text='The help text for this question in the primary language.', null=True, verbose_name='Help (primary)'),
),
migrations.AlterField(
model_name='question',
name='help_lang2',
field=models.TextField(blank=True, help_text='The help text for this question in the secondary language.', null=True, verbose_name='Help (secondary)'),
),
migrations.AlterField(
model_name='question',
name='text_lang1',
field=models.TextField(help_text='The text for this question in the primary language.', verbose_name='Text (primary)'),
),
migrations.AlterField(
model_name='question',
name='text_lang2',
field=models.TextField(help_text='The text for this question in the secondary language.', verbose_name='Text (secondary)'),
),
migrations.AlterField(
model_name='question',
name='verbose_name_lang1',
field=models.CharField(blank=True, help_text='The name displayed for this question in the primary language.', max_length=256, verbose_name='Name (primary)'),
),
migrations.AlterField(
model_name='question',
name='verbose_name_lang2',
field=models.CharField(blank=True, help_text='The name displayed for this question in the secondary language.', max_length=256, verbose_name='Name (secondary)'),
),
migrations.AlterField(
model_name='question',
name='verbose_name_plural_lang1',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the primary language.', max_length=256, verbose_name='Plural name (primary)'),
),
migrations.AlterField(
model_name='question',
name='verbose_name_plural_lang2',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the secondary language.', max_length=256, verbose_name='Plural name (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang1',
field=models.TextField(blank=True, help_text='The help text for this questionset in the primary language.', null=True, verbose_name='Help (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang2',
field=models.TextField(blank=True, help_text='The help text for this questionset in the secondary language.', null=True, verbose_name='Help (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang1',
field=models.CharField(help_text='The title for this questionset in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang2',
field=models.CharField(help_text='The title for this questionset in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='verbose_name_lang1',
field=models.CharField(blank=True, help_text='The name displayed for this question in the primary language.', max_length=256, verbose_name='Name (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='verbose_name_lang2',
field=models.CharField(blank=True, help_text='The name displayed for this question in the secondary language.', max_length=256, verbose_name='Name (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='verbose_name_plural_lang1',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the primary language.', max_length=256, verbose_name='Plural name (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='verbose_name_plural_lang2',
field=models.CharField(blank=True, help_text='The plural name displayed for this question in the secondary language.', max_length=256, verbose_name='Plural name (secondary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang1',
field=models.CharField(help_text='The title for this section in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang2',
field=models.CharField(help_text='The title for this section in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
]
| 50.413793
| 187
| 0.642784
| 664
| 5,848
| 5.487952
| 0.09488
| 0.084523
| 0.137212
| 0.159166
| 0.935785
| 0.935785
| 0.935785
| 0.888035
| 0.876235
| 0.855379
| 0
| 0.019345
| 0.248632
| 5,848
| 115
| 188
| 50.852174
| 0.809968
| 0.011799
| 0
| 0.796296
| 1
| 0
| 0.350935
| 0.021295
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.018519
| 0
| 0.046296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4bdf74b1b4bc15ed09e631921bdae57d07964b62
| 28,610
|
py
|
Python
|
tests/test_clickhouse_sql_rewriter.py
|
Agile-Data/flat-ql
|
3212ae9d0ec4ba822c065bb5e4beccf9e936971b
|
[
"MIT"
] | 3
|
2022-03-21T05:03:39.000Z
|
2022-03-23T01:32:51.000Z
|
tests/test_clickhouse_sql_rewriter.py
|
Agile-Data/flat-ql
|
3212ae9d0ec4ba822c065bb5e4beccf9e936971b
|
[
"MIT"
] | null | null | null |
tests/test_clickhouse_sql_rewriter.py
|
Agile-Data/flat-ql
|
3212ae9d0ec4ba822c065bb5e4beccf9e936971b
|
[
"MIT"
] | null | null | null |
import os
from flatql import SqlRewriter, parse_from_hocon_path, parse_flatql
from flatql.rewriter.clickhouse_sql_rewriter import ClickhouseSqlRewriterFactory
recruitment_schema = parse_from_hocon_path(f"{os.path.dirname(__file__)}/schemas/recruitment")
function_call_rewriter_factory = ClickhouseSqlRewriterFactory()
def test_aggregate_query1():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql('SELECT AVG(ResumeHumaninfo.age) AS "平均年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT if(isNaN("qu_0"."co_3"), 0, "qu_0"."co_3") AS "平均年龄", "qu_1"."co_2" AS "频道数量" FROM (SELECT SUM("qu_2"."co_1") AS "co_2" FROM (SELECT "ta_0"."openId" AS "co_0" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_1" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_3"."co_0" = "qu_2"."co_0") AS "qu_1" CROSS JOIN (SELECT AVG("ta_1"."ageNormalized") AS "co_3" FROM "v_resume" AS "ta_1") AS "qu_0"'
def test_aggregate_query2():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql('SELECT MIN(ResumeHumaninfo.age) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_3" AS "最小年龄", "qu_1"."co_2" AS "频道数量" FROM (SELECT SUM("qu_2"."co_1") AS "co_2" FROM (SELECT "ta_0"."openId" AS "co_0" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_1" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_3"."co_0" = "qu_2"."co_0") AS "qu_1" CROSS JOIN (SELECT MIN("ta_1"."ageNormalized") AS "co_3" FROM "v_resume" AS "ta_1") AS "qu_0"'
def test_aggregate_query3():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT min_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_3" AS "最小年龄", "qu_1"."co_2" AS "频道数量" FROM (SELECT SUM("qu_2"."co_1") AS "co_2" FROM (SELECT "ta_0"."openId" AS "co_0" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_1" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_3"."co_0" = "qu_2"."co_0") AS "qu_1" CROSS JOIN (SELECT minIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_3" FROM "v_resume" AS "ta_1") AS "qu_0"'
def test_aggregate_query4():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT max_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_3" AS "最小年龄", "qu_1"."co_2" AS "频道数量" FROM (SELECT SUM("qu_2"."co_1") AS "co_2" FROM (SELECT "ta_0"."openId" AS "co_0" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_1" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_3"."co_0" = "qu_2"."co_0") AS "qu_1" CROSS JOIN (SELECT maxIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_3" FROM "v_resume" AS "ta_1") AS "qu_0"'
def test_aggregate_query5():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT count_if(ResumeHumaninfo.name, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_5" AS "最小年龄", "qu_1"."co_2" AS "频道数量" FROM (SELECT SUM("qu_2"."co_1") AS "co_2" FROM (SELECT "ta_0"."openId" AS "co_0" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_1" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_3"."co_0" = "qu_2"."co_0") AS "qu_1" CROSS JOIN (SELECT SUM("qu_4"."co_4") AS "co_5" FROM (SELECT "ta_1"."openId" AS "co_3" FROM "v_resume" AS "ta_1" GROUP BY "co_3") AS "qu_5" LEFT JOIN (SELECT "ta_1"."openId" AS "co_3", countIf("ta_1"."name", "ta_1"."name" IS NOT NULL) AS "co_4" FROM "v_resume" AS "ta_1" GROUP BY "co_3") AS "qu_4" ON "qu_5"."co_3" = "qu_4"."co_3") AS "qu_0"'
def test_aggregate_query6():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT sum_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_5" AS "最小年龄", "qu_1"."co_2" AS "频道数量" FROM (SELECT SUM("qu_2"."co_1") AS "co_2" FROM (SELECT "ta_0"."openId" AS "co_0" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_1" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_3"."co_0" = "qu_2"."co_0") AS "qu_1" CROSS JOIN (SELECT SUM("qu_4"."co_4") AS "co_5" FROM (SELECT "ta_1"."openId" AS "co_3" FROM "v_resume" AS "ta_1" GROUP BY "co_3") AS "qu_5" LEFT JOIN (SELECT "ta_1"."openId" AS "co_3", sumIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_4" FROM "v_resume" AS "ta_1" GROUP BY "co_3") AS "qu_4" ON "qu_5"."co_3" = "qu_4"."co_3") AS "qu_0"'
def test_aggregate_query7():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_year(ResumeHumaninfo.createdAt), sum_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1", "qu_0"."co_6" AS "最小年龄", "qu_1"."co_3" AS "频道数量" FROM (SELECT "qu_2"."co_1" AS "co_1", SUM("qu_3"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", toYear("ta_1"."entityCreatedAt") AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" GROUP BY "co_0", "co_1") AS "qu_2" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" ON "qu_2"."co_0" = "qu_3"."co_0" GROUP BY "co_1") AS "qu_1" INNER JOIN (SELECT "qu_4"."co_1" AS "co_1", SUM("qu_5"."co_5") AS "co_6" FROM (SELECT "ta_1"."openId" AS "co_4", toYear("ta_1"."entityCreatedAt") AS "co_1" FROM "v_resume" AS "ta_1" GROUP BY "co_4", "co_1") AS "qu_4" LEFT JOIN (SELECT "ta_1"."openId" AS "co_4", sumIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_5" FROM "v_resume" AS "ta_1" GROUP BY "co_4") AS "qu_5" ON "qu_4"."co_4" = "qu_5"."co_4" GROUP BY "co_1") AS "qu_0" ON "qu_1"."co_1" = "qu_0"."co_1"'
def test_aggregate_query8():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql('SELECT round(AVG(ResumeHumaninfo.age), 2) AS "平均年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT round(if(isNaN("qu_0"."co_3"), 0, "qu_0"."co_3"), 2) AS "平均年龄", "qu_1"."co_2" AS "频道数量" FROM (SELECT SUM("qu_2"."co_1") AS "co_2" FROM (SELECT "ta_0"."openId" AS "co_0" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_1" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_3"."co_0" = "qu_2"."co_0") AS "qu_1" CROSS JOIN (SELECT AVG("ta_1"."ageNormalized") AS "co_3" FROM "v_resume" AS "ta_1") AS "qu_0"'
def test_aggregate_query9():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), sum_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1", "qu_0"."co_6" AS "最小年龄", "qu_1"."co_3" AS "频道数量" FROM (SELECT "qu_2"."co_1" AS "co_1", SUM("qu_3"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", toQuarter("ta_1"."entityCreatedAt") AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" GROUP BY "co_0", "co_1") AS "qu_2" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" ON "qu_2"."co_0" = "qu_3"."co_0" GROUP BY "co_1") AS "qu_1" INNER JOIN (SELECT "qu_4"."co_1" AS "co_1", SUM("qu_5"."co_5") AS "co_6" FROM (SELECT "ta_1"."openId" AS "co_4", toQuarter("ta_1"."entityCreatedAt") AS "co_1" FROM "v_resume" AS "ta_1" GROUP BY "co_4", "co_1") AS "qu_4" LEFT JOIN (SELECT "ta_1"."openId" AS "co_4", sumIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_5" FROM "v_resume" AS "ta_1" GROUP BY "co_4") AS "qu_5" ON "qu_4"."co_4" = "qu_5"."co_4" GROUP BY "co_1") AS "qu_0" ON "qu_1"."co_1" = "qu_0"."co_1"'
def test_aggregate_query10():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_month(ResumeHumaninfo.createdAt), sum_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1", "qu_0"."co_6" AS "最小年龄", "qu_1"."co_3" AS "频道数量" FROM (SELECT "qu_2"."co_1" AS "co_1", SUM("qu_3"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", toMonth("ta_1"."entityCreatedAt") AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" GROUP BY "co_0", "co_1") AS "qu_2" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" ON "qu_2"."co_0" = "qu_3"."co_0" GROUP BY "co_1") AS "qu_1" INNER JOIN (SELECT "qu_4"."co_1" AS "co_1", SUM("qu_5"."co_5") AS "co_6" FROM (SELECT "ta_1"."openId" AS "co_4", toMonth("ta_1"."entityCreatedAt") AS "co_1" FROM "v_resume" AS "ta_1" GROUP BY "co_4", "co_1") AS "qu_4" LEFT JOIN (SELECT "ta_1"."openId" AS "co_4", sumIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_5" FROM "v_resume" AS "ta_1" GROUP BY "co_4") AS "qu_5" ON "qu_4"."co_4" = "qu_5"."co_4" GROUP BY "co_1") AS "qu_0" ON "qu_1"."co_1" = "qu_0"."co_1"'
def test_aggregate_query11():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_week(ResumeHumaninfo.createdAt), sum_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1", "qu_0"."co_6" AS "最小年龄", "qu_1"."co_3" AS "频道数量" FROM (SELECT "qu_2"."co_1" AS "co_1", SUM("qu_3"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", toWeek("ta_1"."entityCreatedAt") AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" GROUP BY "co_0", "co_1") AS "qu_2" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" ON "qu_2"."co_0" = "qu_3"."co_0" GROUP BY "co_1") AS "qu_1" INNER JOIN (SELECT "qu_4"."co_1" AS "co_1", SUM("qu_5"."co_5") AS "co_6" FROM (SELECT "ta_1"."openId" AS "co_4", toWeek("ta_1"."entityCreatedAt") AS "co_1" FROM "v_resume" AS "ta_1" GROUP BY "co_4", "co_1") AS "qu_4" LEFT JOIN (SELECT "ta_1"."openId" AS "co_4", sumIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_5" FROM "v_resume" AS "ta_1" GROUP BY "co_4") AS "qu_5" ON "qu_4"."co_4" = "qu_5"."co_4" GROUP BY "co_1") AS "qu_0" ON "qu_1"."co_1" = "qu_0"."co_1"'
def test_aggregate_query12():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT if_null_or_empty(Tenant.name, \'无名称\') AS "租户", sum_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1" AS "租户", "qu_0"."co_6" AS "最小年龄", "qu_1"."co_3" AS "频道数量" FROM (SELECT "qu_2"."co_1" AS "co_1", SUM("qu_3"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", CASE WHEN empty("ta_1"."name") THEN \'无名称\' ELSE "ta_1"."name" END AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_tenant" AS "ta_1" ON "ta_0"."tenant" = "ta_1"."name" GROUP BY "co_0", "co_1") AS "qu_2" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" ON "qu_2"."co_0" = "qu_3"."co_0" GROUP BY "co_1") AS "qu_1" INNER JOIN (SELECT "qu_4"."co_1" AS "co_1", SUM("qu_5"."co_5") AS "co_6" FROM (SELECT "ta_2"."openId" AS "co_4", CASE WHEN empty("ta_1"."name") THEN \'无名称\' ELSE "ta_1"."name" END AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_3" ON "ta_0"."openId" = "ta_3"."channelOpenId" INNER JOIN "v_flow" AS "ta_4" ON "ta_3"."openId" = "ta_4"."circuitForeignId" INNER JOIN "v_resume" AS "ta_2" ON "ta_2"."openId" = "ta_4"."beanSourceId" INNER JOIN "v_tenant" AS "ta_1" ON "ta_0"."tenant" = "ta_1"."name" GROUP BY "co_4", "co_1") AS "qu_4" LEFT JOIN (SELECT "ta_2"."openId" AS "co_4", sumIf("ta_2"."ageNormalized", "ta_2"."name" IS NOT NULL) AS "co_5" FROM "v_resume" AS "ta_2" GROUP BY "co_4") AS "qu_5" ON "qu_4"."co_4" = "qu_5"."co_4" GROUP BY "co_1") AS "qu_0" ON "qu_1"."co_1" = "qu_0"."co_1"'
def test_aggregate_query13():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT if_null(Tenant.name, \'无名称\') AS "租户", sum_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1" AS "租户", "qu_0"."co_6" AS "最小年龄", "qu_1"."co_3" AS "频道数量" FROM (SELECT "qu_2"."co_1" AS "co_1", SUM("qu_3"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", CASE WHEN "ta_1"."name" IS NULL THEN \'无名称\' ELSE "ta_1"."name" END AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_tenant" AS "ta_1" ON "ta_0"."tenant" = "ta_1"."name" GROUP BY "co_0", "co_1") AS "qu_2" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" ON "qu_2"."co_0" = "qu_3"."co_0" GROUP BY "co_1") AS "qu_1" INNER JOIN (SELECT "qu_4"."co_1" AS "co_1", SUM("qu_5"."co_5") AS "co_6" FROM (SELECT "ta_2"."openId" AS "co_4", CASE WHEN "ta_1"."name" IS NULL THEN \'无名称\' ELSE "ta_1"."name" END AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_3" ON "ta_0"."openId" = "ta_3"."channelOpenId" INNER JOIN "v_flow" AS "ta_4" ON "ta_3"."openId" = "ta_4"."circuitForeignId" INNER JOIN "v_resume" AS "ta_2" ON "ta_2"."openId" = "ta_4"."beanSourceId" INNER JOIN "v_tenant" AS "ta_1" ON "ta_0"."tenant" = "ta_1"."name" GROUP BY "co_4", "co_1") AS "qu_4" LEFT JOIN (SELECT "ta_2"."openId" AS "co_4", sumIf("ta_2"."ageNormalized", "ta_2"."name" IS NOT NULL) AS "co_5" FROM "v_resume" AS "ta_2" GROUP BY "co_4") AS "qu_5" ON "qu_4"."co_4" = "qu_5"."co_4" GROUP BY "co_1") AS "qu_0" ON "qu_1"."co_1" = "qu_0"."co_1"'
def test_aggregate_query14():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql('SELECT round(AVG(ResumeHumaninfo.age), 2) + 100 AS "平均年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT round(if(isNaN("qu_0"."co_3"), 0, "qu_0"."co_3"), 2) + 100 AS "平均年龄", "qu_1"."co_2" AS "频道数量" FROM (SELECT SUM("qu_2"."co_1") AS "co_2" FROM (SELECT "ta_0"."openId" AS "co_0" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_1" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_3"."co_0" = "qu_2"."co_0") AS "qu_1" CROSS JOIN (SELECT AVG("ta_1"."ageNormalized") AS "co_3" FROM "v_resume" AS "ta_1") AS "qu_0"'
def test_aggregate_query15():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql('SELECT CASE WHEN ResumeHumaninfo.age BETWEEN 0 AND 30 THEN \'青年\' ELSE \'壮年\' END AS "年龄段", '
'COUNT(CASE WHEN Channel.name IS NOT NULL THEN 1 ELSE NULL END) AS "频道数量" '
'FROM recruitment WHERE has_any(ResumeHumaninfo.emails, [\'abc\', \'def\', \'hjk\'])').rewrite(
sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1" AS "年龄段", "qu_0"."co_3" AS "频道数量" FROM (SELECT "qu_1"."co_1" AS "co_1", SUM("qu_2"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", CASE WHEN "ta_1"."ageNormalized" BETWEEN 0 AND 30 THEN \'青年\' ELSE \'壮年\' END AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" WHERE hasAny("ta_1"."emails", [\'abc\',\'def\',\'hjk\']) GROUP BY "co_0", "co_1") AS "qu_1" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT(CASE WHEN "ta_0"."name" IS NOT NULL THEN 1 ELSE NULL END) AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_1"."co_0" = "qu_2"."co_0" GROUP BY "co_1") AS "qu_0"'
def test_aggregate_query16():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql('SELECT CASE WHEN ResumeHumaninfo.age BETWEEN 0 AND 30 THEN \'青年\' ELSE \'壮年\' END AS "年龄段", '
'COUNT(CASE WHEN Channel.name IS NOT NULL THEN 1 ELSE NULL END) AS "频道数量" '
'FROM recruitment WHERE has_all(ResumeHumaninfo.emails, [\'abc\', \'def\', \'hjk\'])').rewrite(
sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1" AS "年龄段", "qu_0"."co_3" AS "频道数量" FROM (SELECT "qu_1"."co_1" AS "co_1", SUM("qu_2"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", CASE WHEN "ta_1"."ageNormalized" BETWEEN 0 AND 30 THEN \'青年\' ELSE \'壮年\' END AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" WHERE hasAll("ta_1"."emails", [\'abc\',\'def\',\'hjk\']) GROUP BY "co_0", "co_1") AS "qu_1" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT(CASE WHEN "ta_0"."name" IS NOT NULL THEN 1 ELSE NULL END) AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_1"."co_0" = "qu_2"."co_0" GROUP BY "co_1") AS "qu_0"'
def test_aggregate_query17():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql('SELECT CASE WHEN ResumeHumaninfo.age BETWEEN 0 AND 30 THEN \'青年\' ELSE \'壮年\' END AS "年龄段", '
'COUNT(CASE WHEN Channel.name IS NOT NULL THEN 1 ELSE NULL END) AS "频道数量" '
'FROM recruitment WHERE has(ResumeHumaninfo.emails, \'abc\')').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1" AS "年龄段", "qu_0"."co_3" AS "频道数量" FROM (SELECT "qu_1"."co_1" AS "co_1", SUM("qu_2"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", CASE WHEN "ta_1"."ageNormalized" BETWEEN 0 AND 30 THEN \'青年\' ELSE \'壮年\' END AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" WHERE has("ta_1"."emails", \'abc\') GROUP BY "co_0", "co_1") AS "qu_1" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT(CASE WHEN "ta_0"."name" IS NOT NULL THEN 1 ELSE NULL END) AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_2" ON "qu_1"."co_0" = "qu_2"."co_0" GROUP BY "co_1") AS "qu_0"'
def test_aggregate_query18():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), avg_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1", if(isNaN("qu_0"."co_4"), 0, "qu_0"."co_4") AS "最小年龄", "qu_1"."co_3" AS "频道数量" FROM (SELECT "qu_2"."co_1" AS "co_1", SUM("qu_3"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", toQuarter("ta_1"."entityCreatedAt") AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" GROUP BY "co_0", "co_1") AS "qu_2" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" ON "qu_2"."co_0" = "qu_3"."co_0" GROUP BY "co_1") AS "qu_1" INNER JOIN (SELECT toQuarter("ta_1"."entityCreatedAt") AS "co_1", avgIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_4" FROM "v_resume" AS "ta_1" GROUP BY "co_1") AS "qu_0" ON "qu_1"."co_1" = "qu_0"."co_1"'
def test_aggregate_query19():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT date_format(ResumeHumaninfo.createdAt, \'%Y-%m-%d\'), avg_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1", if(isNaN("qu_0"."co_4"), 0, "qu_0"."co_4") AS "最小年龄", "qu_1"."co_3" AS "频道数量" FROM (SELECT "qu_2"."co_1" AS "co_1", SUM("qu_3"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", formatDateTime("ta_1"."entityCreatedAt", \'%Y-%m-%d\') AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" GROUP BY "co_0", "co_1") AS "qu_2" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" ON "qu_2"."co_0" = "qu_3"."co_0" GROUP BY "co_1") AS "qu_1" INNER JOIN (SELECT formatDateTime("ta_1"."entityCreatedAt", \'%Y-%m-%d\') AS "co_1", avgIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_4" FROM "v_resume" AS "ta_1" GROUP BY "co_1") AS "qu_0" ON "qu_1"."co_1" = "qu_0"."co_1"'
def test_aggregate_query20():
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_year(now()) - to_year(ResumeHumaninfo.createdAt), avg_if(ResumeHumaninfo.age, ResumeHumaninfo.name IS NOT NULL) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
assert sql_rewriter.to_sql() == 'SELECT "qu_0"."co_1", if(isNaN("qu_0"."co_4"), 0, "qu_0"."co_4") AS "最小年龄", "qu_1"."co_3" AS "频道数量" FROM (SELECT "qu_2"."co_1" AS "co_1", SUM("qu_3"."co_2") AS "co_3" FROM (SELECT "ta_0"."openId" AS "co_0", toYear(now()) - toYear("ta_1"."entityCreatedAt") AS "co_1" FROM "v_channels" AS "ta_0" INNER JOIN "v_projects" AS "ta_2" ON "ta_0"."openId" = "ta_2"."channelOpenId" INNER JOIN "v_flow" AS "ta_3" ON "ta_2"."openId" = "ta_3"."circuitForeignId" INNER JOIN "v_resume" AS "ta_1" ON "ta_1"."openId" = "ta_3"."beanSourceId" GROUP BY "co_0", "co_1") AS "qu_2" LEFT JOIN (SELECT "ta_0"."openId" AS "co_0", COUNT("ta_0"."name") AS "co_2" FROM "v_channels" AS "ta_0" GROUP BY "co_0") AS "qu_3" ON "qu_2"."co_0" = "qu_3"."co_0" GROUP BY "co_1") AS "qu_1" INNER JOIN (SELECT toYear(now()) - toYear("ta_1"."entityCreatedAt") AS "co_1", avgIf("ta_1"."ageNormalized", "ta_1"."name" IS NOT NULL) AS "co_4" FROM "v_resume" AS "ta_1" GROUP BY "co_1") AS "qu_0" ON "qu_1"."co_1" = "qu_0"."co_1"'
def test_clickhouse_function_check1():
import pytest
with pytest.raises(SyntaxError):
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), avg_if(ResumeHumaninfo.name IS NOT NULL, ResumeHumaninfo.age) AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
with pytest.raises(SyntaxError):
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), avg_if() AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
with pytest.raises(SyntaxError):
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), sum_if() AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
with pytest.raises(SyntaxError):
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), min_if() AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
with pytest.raises(SyntaxError):
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), max_if() AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
with pytest.raises(SyntaxError):
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), count_if() AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
with pytest.raises(SyntaxError):
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), if_null_or_empty() AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
with pytest.raises(SyntaxError):
sql_rewriter = SqlRewriter(recruitment_schema, function_call_rewriter_factory=function_call_rewriter_factory)
parse_flatql(
'SELECT to_quarter(ResumeHumaninfo.createdAt), if_null() AS "最小年龄", COUNT(Channel.name) AS "频道数量" '
'FROM recruitment').rewrite(sql_rewriter)
| 131.843318
| 1,431
| 0.68504
| 5,156
| 28,610
| 3.501164
| 0.02851
| 0.035453
| 0.038389
| 0.085254
| 0.963107
| 0.962386
| 0.962386
| 0.961888
| 0.961888
| 0.95862
| 0
| 0.044258
| 0.139951
| 28,610
| 217
| 1,432
| 131.843318
| 0.689385
| 0
| 0
| 0.545455
| 0
| 0.345455
| 0.740589
| 0.113767
| 0
| 0
| 0
| 0
| 0.121212
| 1
| 0.127273
| false
| 0
| 0.024242
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4be4539f7638a26ec1575337759c98ebcb7e4a0c
| 11,042
|
py
|
Python
|
models/model.py
|
iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer
|
57904544c6d6b43dcd5937afeb474c0a47456d98
|
[
"MIT"
] | null | null | null |
models/model.py
|
iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer
|
57904544c6d6b43dcd5937afeb474c0a47456d98
|
[
"MIT"
] | null | null | null |
models/model.py
|
iamsofancyyoualreadyknow/IHC-based-labels-generation-and-semantic-segmentation-for-lung-cancer
|
57904544c6d6b43dcd5937afeb474c0a47456d98
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from six.moves import cPickle
import deeplab
import deeplab_v2
arg_scope = tf.contrib.framework.arg_scope
class DeepLabLFOVModel(object):
"""DeepLab-LargeFOV model with atrous convolution and bilinear upsampling.
This class implements a multi-layer convolutional neural network for semantic image segmentation task.
This is the same as the model described in this paper: https://arxiv.org/abs/1412.7062 - please look
there for details.
"""
def __init__(self, number_class=3):
"""Create the model"""
self.n_classes = number_class
def _create_network(self, input_batch, dropout):
"""Construct DeepLab-LargeFOV network.
Args:
input_batch: batch of pre-processed images.
keep_prob: probability of keeping neurons intact.
Returns:
A downsampled segmentation mask.
"""
if dropout is False:
train = False
else:
train = True
net, _ = deeplab.deeplab(input_batch, self.n_classes, train=train, dropout=dropout, weight_decay=0.0005)
return net
def prepare_label(self, input_batch, new_size):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch,
new_size) # As labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.
input_batch = tf.one_hot(input_batch, depth=self.n_classes)
return input_batch
def preds(self, input_batch):
"""Create the network and run inference on the input batch.
Args:
input_batch: batch of pre-processed images.
Returns:
Argmax over the predictions of the network of the same shape as the input.
"""
raw_output = self._create_network(tf.cast(input_batch, tf.float32), dropout=False)
raw_output = tf.image.resize_bilinear(raw_output, tf.shape(input_batch)[1:3, ])
raw_output = tf.argmax(raw_output, axis=3)
raw_output = tf.expand_dims(raw_output, axis=3) # Create 4D-tensor.
return tf.cast(raw_output, tf.uint8)
def loss(self, img_batch, label_batch, mask_batch):
"""Create the network, run inference on the input batch and compute loss.
Args:
input_batch: batch of pre-processed images.
Returns:
Pixel-wise softmax loss.
"""
raw_output = self._create_network(tf.cast(img_batch, tf.float32), dropout=True)
# Get prediction output
raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img_batch)[1:3, ])
raw_output_up = tf.argmax(raw_output_up, axis=3)
raw_output_up = tf.expand_dims(raw_output_up, axis=3) # Create 4D-tensor.
pred = tf.cast(raw_output_up, tf.uint8)
prediction = tf.reshape(raw_output, [-1, self.n_classes])
# Prepare ground truth output
label_batch = tf.image.resize_nearest_neighbor(label_batch, tf.stack(raw_output.get_shape()[1:3]))
gt = tf.expand_dims(tf.cast(tf.reshape(label_batch, [-1]), tf.int32), axis=1)
# Prepare mask
resized_mask_batch = tf.image.resize_nearest_neighbor(mask_batch, tf.stack(raw_output.get_shape()[1:3]))
resized_mask_batch = tf.cast(tf.reshape(resized_mask_batch, [-1]), tf.float32)
mask = tf.reshape(resized_mask_batch, gt.get_shape())
# Calculate the masked loss
loss = tf.losses.sparse_softmax_cross_entropy(logits=prediction, labels=gt, weights=mask)
return pred, loss
class DeepLabV2Model(object):
def __init__(self, number_class=34):
"""Create the model"""
self.n_classes = number_class
def _create_network(self, input_batch, dropout):
"""Construct DeepLab-LargeFOV network.
Args:
input_batch: batch of pre-processed images.
keep_prob: probability of keeping neurons intact.
Returns:
A downsampled segmentation mask.
"""
if dropout is False:
train = False
else:
train = True
net = deeplab_v2.deeplab_v2(input_batch, self.n_classes, dropout=dropout, weight_decay=0.0005)
return net
def prepare_label(self, input_batch, new_size):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # As labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.
input_batch = tf.one_hot(input_batch, depth=self.n_classes)
return input_batch
def preds(self, input_batch):
"""Create the network and run inference on the input batch.
Args:
input_batch: batch of pre-processed images.
Returns:
Argmax over the predictions of the network of the same shape as the input.
"""
raw_output = self._create_network(tf.cast(input_batch, tf.float32), dropout=False)
raw_output = tf.image.resize_bilinear(raw_output, tf.shape(input_batch)[1:3,])
raw_output = tf.argmax(raw_output, axis=3)
raw_output = tf.expand_dims(raw_output, axis=3) # Create 4D-tensor.
return tf.cast(raw_output, tf.uint8)
def loss(self, img_batch, label_batch):
"""Create the network, run inference on the input batch and compute loss.
Args:
input_batch: batch of pre-processed images.
Returns:
Pixel-wise softmax loss.
"""
raw_output = self._create_network(tf.cast(img_batch, tf.float32), dropout=False)
# Get pred mask
raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img_batch)[1:3, ])
raw_output_up = tf.argmax(raw_output_up, axis=3)
raw_output_up = tf.expand_dims(raw_output_up, axis=3) # Create 4D-tensor.
pred = tf.cast(raw_output_up, tf.uint8)
# Compute the loss
prediction = tf.reshape(raw_output, [-1, self.n_classes])
# Need to resize labels and convert using one-hot encoding.
label_batch = self.prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]))
gt = tf.reshape(label_batch, [-1, self.n_classes])
# Pixel-wise softmax loss.
loss = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
reduced_loss = tf.reduce_mean(loss)
return pred, reduced_loss
class ResNetDeepLabV2Model(object):
def __init__(self, number_class=34):
"""Create the model"""
self.n_classes = number_class
def _create_network(self, input_batch, is_training):
"""Construct DeepLab-LargeFOV network.
Args:
input_batch: batch of pre-processed images.
keep_prob: probability of keeping neurons intact.
Returns:
A downsampled segmentation mask.
"""
# DeepLab lfov
# net, endpoints = deeplab(input_batch, self.n_classes, train = train, dropout = dropout, weight_decay = 0.0005)
# DeepLab V2
net = res_deeplab_v2.deeplab_v2(input_batch, self.n_classes, is_taining=is_training, weight_decay=0.0005)
return net
def prepare_label(self, input_batch, new_size):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch,
new_size) # As labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.
input_batch = tf.one_hot(input_batch, depth=self.n_classes)
return input_batch
def preds(self, input_batch, update_BN=False):
"""Create the network and run inference on the input batch.
Args:
input_batch: batch of pre-processed images.
Returns:
Argmax over the predictions of the network of the same shape as the input.
"""
raw_output = self._create_network(tf.cast(input_batch, tf.float32), is_training=update_BN)
raw_output = tf.image.resize_bilinear(raw_output, tf.shape(input_batch)[1:3, ])
raw_output = tf.argmax(raw_output, axis=3)
raw_output = tf.expand_dims(raw_output, axis=3) # Create 4D-tensor.
return tf.cast(raw_output, tf.uint8)
def loss(self, img_batch, label_batch):
"""Create the network, run inference on the input batch and compute loss.
Args:
input_batch: batch of pre-processed images.
Returns:
Pixel-wise softmax loss.
"""
raw_output = self._create_network(tf.cast(img_batch, tf.float32), is_training=True)
# Get pred mask
raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img_batch)[1:3, ])
raw_output_up = tf.argmax(raw_output_up, axis=3)
raw_output_up = tf.expand_dims(raw_output_up, axis=3) # Create 4D-tensor.
pred = tf.cast(raw_output_up, tf.uint8)
# Compute the loss
prediction = tf.reshape(raw_output, [-1, self.n_classes])
# Need to resize labels and convert using one-hot encoding.
label_batch = self.prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]))
gt = tf.reshape(label_batch, [-1, self.n_classes])
# Pixel-wise softmax loss.
loss = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=gt)
reduced_loss = tf.reduce_mean(loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
reduced_loss = control_flow_ops.with_dependencies([updates], reduced_loss)
return pred, reduced_loss
| 39.435714
| 137
| 0.643452
| 1,494
| 11,042
| 4.544177
| 0.131861
| 0.085432
| 0.029165
| 0.022978
| 0.859626
| 0.835616
| 0.825895
| 0.825895
| 0.825895
| 0.804537
| 0
| 0.015716
| 0.268158
| 11,042
| 280
| 138
| 39.435714
| 0.824403
| 0.325756
| 0
| 0.714286
| 0
| 0
| 0.005365
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133929
| false
| 0
| 0.044643
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ef079f774564373acb99ad1915fc626d422b0f96
| 198
|
py
|
Python
|
tests/python-reference/builtin/len.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 25
|
2015-04-16T04:31:49.000Z
|
2022-03-10T15:53:28.000Z
|
tests/python-reference/builtin/len.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 1
|
2018-11-21T22:40:02.000Z
|
2018-11-26T17:53:11.000Z
|
tests/python-reference/builtin/len.py
|
jpolitz/lambda-py-paper
|
746ef63fc1123714b4adaf78119028afbea7bd76
|
[
"Apache-2.0"
] | 1
|
2021-03-26T03:36:19.000Z
|
2021-03-26T03:36:19.000Z
|
___assertEqual(len('123'), 3)
___assertEqual(len(()), 0)
___assertEqual(len((1, 2, 3, 4)), 4)
___assertEqual(len([1, 2, 3, 4]), 4)
___assertEqual(len({}), 0)
___assertEqual(len({'a':1, 'b': 2}), 2)
| 28.285714
| 39
| 0.621212
| 31
| 198
| 3.387097
| 0.322581
| 0.8
| 0.285714
| 0.495238
| 0.780952
| 0.495238
| 0.495238
| 0.495238
| 0.495238
| 0
| 0
| 0.106742
| 0.10101
| 198
| 6
| 40
| 33
| 0.483146
| 0
| 0
| 0
| 0
| 0
| 0.025253
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ef4165586432832f8a1d8178b3d00c49cc72eba9
| 2,067
|
py
|
Python
|
adventofcode/tests/test_day3.py
|
jcfvalente/adventofcode2020
|
ec0deede4661dd80945d96cb72b034579b9ac62e
|
[
"MIT"
] | null | null | null |
adventofcode/tests/test_day3.py
|
jcfvalente/adventofcode2020
|
ec0deede4661dd80945d96cb72b034579b9ac62e
|
[
"MIT"
] | null | null | null |
adventofcode/tests/test_day3.py
|
jcfvalente/adventofcode2020
|
ec0deede4661dd80945d96cb72b034579b9ac62e
|
[
"MIT"
] | null | null | null |
from adventofcode.day3 import solve_part_one
from adventofcode.day3 import solve_part_two
def test_part_one():
puzzle = ["..##.........##.........##.........##.........##.........##.......",
"#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..",
".#....#..#..#....#..#..#....#..#..#....#..#..#....#..#..#....#..#.",
"..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#",
".#...##..#..#...##..#..#...##..#..#...##..#..#...##..#..#...##..#.",
"..#.##.......#.##.......#.##.......#.##.......#.##.......#.##.....",
".#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#",
".#........#.#........#.#........#.#........#.#........#.#........#",
"#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...",
"#...##....##...##....##...##....##...##....##...##....##...##....#",
".#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#"]
assert solve_part_one(puzzle, 3) == 7
def test_part_two():
puzzle = ["..##.........##.........##.........##.........##.........##.......",
"#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..#...#...#..",
".#....#..#..#....#..#..#....#..#..#....#..#..#....#..#..#....#..#.",
"..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#..#.#...#.#",
".#...##..#..#...##..#..#...##..#..#...##..#..#...##..#..#...##..#.",
"..#.##.......#.##.......#.##.......#.##.......#.##.......#.##.....",
".#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#.#.#.#....#",
".#........#.#........#.#........#.#........#.#........#.#........#",
"#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...#.##...#...",
"#...##....##...##....##...##....##...##....##...##....##...##....#",
".#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#.#..#...#.#"]
assert solve_part_two(puzzle) == 336
| 62.636364
| 83
| 0.087567
| 37
| 2,067
| 4.567568
| 0.405405
| 0.213018
| 0.236686
| 0.307692
| 0.414201
| 0.414201
| 0
| 0
| 0
| 0
| 0
| 0.00407
| 0.167876
| 2,067
| 32
| 84
| 64.59375
| 0.094186
| 0
| 0
| 0.785714
| 0
| 0
| 0.702467
| 0.702467
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
327701cbf140fd1b0f3cb8a80efb01366c5e0e52
| 152
|
py
|
Python
|
pylimit/__init__.py
|
joaomedeiros95/pylimit
|
d2170a8c02a9be083f37c9e4ec1e28700a33d64e
|
[
"Apache-2.0"
] | 17
|
2016-10-28T06:58:41.000Z
|
2021-07-29T06:40:55.000Z
|
pylimit/__init__.py
|
joaomedeiros95/pylimit
|
d2170a8c02a9be083f37c9e4ec1e28700a33d64e
|
[
"Apache-2.0"
] | 5
|
2016-11-15T02:42:27.000Z
|
2021-04-20T09:00:14.000Z
|
pylimit/__init__.py
|
joaomedeiros95/pylimit
|
d2170a8c02a9be083f37c9e4ec1e28700a33d64e
|
[
"Apache-2.0"
] | 10
|
2016-08-09T11:33:41.000Z
|
2021-04-08T01:51:12.000Z
|
from pylimit.pyratelimit import PyRateLimit
from pylimit.pyratelimit_exception import PyRateLimitException
from pylimit.redis_helper import RedisHelper
| 38
| 62
| 0.901316
| 17
| 152
| 7.941176
| 0.529412
| 0.244444
| 0.325926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 152
| 3
| 63
| 50.666667
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
32a99a8d14e5899512947e1ceebac21756a467d0
| 700
|
py
|
Python
|
Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex108.py
|
Jhonattan-rocha/Meus-primeiros-programas
|
f5971b66c0afd049b5d0493e8b7a116b391d058e
|
[
"MIT"
] | null | null | null |
Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex108.py
|
Jhonattan-rocha/Meus-primeiros-programas
|
f5971b66c0afd049b5d0493e8b7a116b391d058e
|
[
"MIT"
] | null | null | null |
Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex108.py
|
Jhonattan-rocha/Meus-primeiros-programas
|
f5971b66c0afd049b5d0493e8b7a116b391d058e
|
[
"MIT"
] | null | null | null |
from Curos_Python_curemvid.Uteis import Exer108M
numero = int(input("Digite quanto dinheiro você tem: "))
print(f"O dobro de {Exer108M.formatar(numero)} é: {Exer108M.formatar(Exer108M.dobro(numero))}")
print(f"A metade do {Exer108M.formatar(numero)} é: {Exer108M.formatar(Exer108M.metade(numero))}")
por = float(input("Digite o número da porcentagem que deseja aumentar o número: "))
print(f"{Exer108M.formatar(numero)} com {por}% de aumento é: {Exer108M.formatar(Exer108M.aumentar(numero, por))}")
por = float(input("Digite o número da porcentagem que deseja diminuir o número: "))
print(f"{Exer108M.formatar(numero)} com {por}% de desconto é: {Exer108M.formatar(Exer108M.diminuir(numero, por))}")
| 70
| 115
| 0.754286
| 101
| 700
| 5.207921
| 0.366337
| 0.243346
| 0.1673
| 0.190114
| 0.524715
| 0.524715
| 0.524715
| 0.346008
| 0.346008
| 0.346008
| 0
| 0.061611
| 0.095714
| 700
| 9
| 116
| 77.777778
| 0.769352
| 0
| 0
| 0
| 0
| 0.5
| 0.765714
| 0.404286
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.5
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
3eac02688fa8b32ea72c7d2eaa863fcd4fd0cb5e
| 7,267
|
py
|
Python
|
bd9.py
|
HANTER2/bd-7-
|
471044a699688d6400366fc76cfacf534a12578d
|
[
"Apache-2.0"
] | null | null | null |
bd9.py
|
HANTER2/bd-7-
|
471044a699688d6400366fc76cfacf534a12578d
|
[
"Apache-2.0"
] | null | null | null |
bd9.py
|
HANTER2/bd-7-
|
471044a699688d6400366fc76cfacf534a12578d
|
[
"Apache-2.0"
] | null | null | null |
#Encrypted By SOMI BRAND
#WHATSAPP : +923455453538/DON,T TRY TO EDIT THIS TOOL/
import zlib, base64
exec(zlib.decompress(base64.b64decode("eJztXVtv20iWfg5/RbWMhNJaoUTqbsPYkS/peNuxDdtJJusOBEosWYx4UZNUbGdngTzkoYF9mO3ZXNCNbWAXixlsA/O62P0F8zRv+w/yS7ZOkRTvpC6WN7a7mi1TVedSdU7VYfnjiWsFPTUtUVpBx7vbR+3aCjoZjM8UuY9Jja7KXUNfQV+L6puVr0pj0yh1Za00urQGuiYwKz1dkrWzjbHVf9hkZHWkGxbSzaJ5aRYtWcVFSbQwvTFETdLV4kA0B4rcLRq4aA0MLAJ38ZWpa8WxoUBDT9eHMoa7M2yNRNNkdJMj4iys5llDRQ+NPuKsC4stMH3dQBqSNURkn+E8XyalsMYwiBRN7RpoA9laOfgha1aet0sRtexSoLT0g6jgTEvSxxZh00dYy7NUTRGxItFFaUYGCAHRToXHxPWVsTnIOwIt43KN3jkWMfB3Y2xaJoMvenhkoV1avWMYumHT+cY4kkcCGRTxiKIgFfcGoia/wWyC5AnBzKKdPjmCwUecqWA8yvOFMN/JAGvIuhzhNeQ4HnV10wSzXK3LXTsVvWH1DV1F6lix5JGh97BpEm5upOuKa4ETKvOQ1Ni0rgzONoisa6ZLuqVrGu5BFbWPI9tV5VJtGvq5iQ2GYQys6KKUJyMrMNTV2JJwXySdwZo98fMsmflNYgg62yaiOEdGHhqArUPqJQV3DL2rW2b+kaiYONKG+wYmc8iT0hlY1oh7fHJyeGS3Hdom0IngoipedMDMGzwVJErSgNgBGybpyGmefUrUP2yfYY1O4YMRNsRSi2uWUb6tSYYuS+uIVqInsiaXKgJX5gShVi01axx6uo5kqYAOiUpLLwkcL3BVoYKeEeHEdiXyla+zhZdxasegVrTVstui8loelniuzhG9e7I2vlgH4U4PUJWrcsI62v9trYY2x7Iilb45OKnVyvUCOn202d4vPdqsttfJ3bMSXyYyoI91rt4kVZvPStVaq1zl62XybftJ6R8krJmydblR4crFc1myBht8uVkuDrB8NrA2+JZQ/kdCubdVkq3O7gm5PQqI2DoqHepkuj/Ru7KCScWTRyXRHJuga9u9O9wv9Ug46Ys93CWTlhuKlqiJ0IFnpfbx0+PO35fL7W3y/fhZqcaB2IPDEg/S26WLZn1NNFQsduWHrxvi+kuw4Ao6wmdjRTTI3FR0w2S6itgbbuS+LVcqp+X1SlnNIaesoE1oI5NS8tp5rx0ojrDEnBkYax6F4JfwNbQxl1hR9HOPpDIhWUEvaBvpxhh7BFWfFujGGDOjsTFSfCQ1n4xD2sb0LkVfP+oBGVukjTkfyJZPRMPf0+fQRpYgWW9I0aWBbFj5gh3QnK8w3+h3xLKItflgqj+R2oqoytrWQD+XBmMD6m5f65fUl+TWy3Azymifjh1NT/Ctxr6kswQ2CzpsFpzpY88lKPSpjthvjW8v+O4pv96qqiiuuM2CyqJVpBcnAhJ2AVB8j9UyBw9W8h+d00OsjEUDpvQ9R/8JCfpDk2OZe/aTlzy/ZDLnbXKxJw7zXWcBnJOpz4oDS3rzqsfSGglq2Mk4ZRjnhTdCCa2S9q/Y1fPT0IaoXFTITue88JAvvFyVKYOBrbGhoR62iEqp4KxB+2tyDyZqzz21rwjVOSdrEr7Iy55NLjbQBWfgEYlmOM9+dd9k78tFlkaB++Y6r7L3TcvIV/jVVwWb6YJ23w4TKutUBYWU2eKEoBDemp0bJJbkL1ZZMhnc8bwSFVHLvwH7Q9cxdP0NcSvQkLp7EXbyuA7Uup6+d8/vYyjgZ1BhyUPqX0smdxCtWI7OU5bjnB8cIpPz3mRmUkJQbu8zc96MrKiHChZNjJ6LsjWZiCT4r+qF4nq0V+vB7RzDdMmTg3ShzOhD+qB+yciS/bM36to3r8eKpkNUtQMyebTsk6/PSG2OtnktgmrX+nbmuR7pn5EDVSuk7OlnOvxkGIXc8WjDdg7pc01lc7kc8/nj98714dPnjz+4l7/y+/BFKZloVbDG/frh/ecP/wyXS8l4DQGG9xHOYHc+THSShg8/+cT/PAvnR8oMXfrZTxE3ThDNBKl916TeufFfQMwwYZuEDBWweMT0sYZNd5LfWx5l1OxBf0RsFjJeAn22439IpPwQmj7xzKmGic6y7MkUavoU6U14rFm9SaHPnqARyvjJmj5H4z0Y1/uUSR+lDPQmtADS5r2/Pok+cyElSYAahhbP8HD9CUzw/i2t/FO4Hio/xTd5LMnspN5pIgsInGUr/kTvf6BtP8TUe9e/+ToRYkli/8ll+dk2/4SUNPyHz0gRcYlXEsuk3u7lT/TeiXoT0vdUa9K4f3L99XNEa4qpJuPzxsrM0tOJiD+H1cew+M3waULABOfvL0DqGPmXyHT2f2ay/BKYwg7Bz/C0hYewgDbob4arUMF0Ykp0jTO+tn+ZXPG0fib0l//563/99d//91/J53/+9d1f/tvPTq6NiRR667+A29MUbPzh86ffU7ZPv3dXsZ8x0MeQWJseNPpjgJ8bpbBT6h//SNh//GOUPdhlz1a2xk5AKaUP9W1i3wiD1y2oJbd/oN+cT7sq6q0wnUONkv0byxBD7+dK0BIlRz5GlKQsbkahjo/TNsNvI261jRNl9g2Xmfg0Opgp2ZPm+zTsKWsliT008iz2iD2oWf8wD+t8PGlMnQQWb+J5VzjSpBKjSGDy0XbCYtMog66LVhGjvP3x88e38M0O9ZnXx3dwZZK9/SeH2OXqMO5tsiqb3Pn83lGVRO/vCNE2Ifto77Filfl5PrrbtMmnTQBfvW4HW+nNRJnbuThlIZ6wGnqlUPqVvXeJP74Naort3Xuf6LDFAr2e9GMiNNSvZEFhuZ50JoHinedbuGzpvhtPpUMQJyh2HQZ0xLNFOT39CR322N7SXYWNW9DdRoFhtnTDwD0LXkRooooBPxApOpdzmw5F0zzXDUAickJZ4HOAF+gjAJcsY4xZQIgVjPJ2pVvrYFBjT64hnndkbTS28rkdzcIGOoH3Q67iNZSzISG5j/Ie1wYKddCRe2/k9con2AUwGqoPf0E+de5g1pCrz9XpCZzodIkLHlrmQZG5Pf3sDEsACZnjHrzw6Y8V5RKJJsqhVW/gK9v49Zqiv8adgdgbYoO5F4IchUJAumvbPrx9YidNmHyL7YY75Kr63NC1s8kIcwFi33vCC+nsIbw6RfDeylwrlXyvSnq6Wnoi2ROgVq0IvIPTBbUnaHZd5GleQKuDUa2gvd2tnf3jHUS/MSsb4bJiv32QTXiz5L598CmmmJczDDLpZS1foJLp/YpbnFcYtDlVhD12Cpj533Pk/a05D0c75V96iPTxSfvoBG3tHezv7n+NDo92jo8Rn0ceNdrfeY6eHm63T3ZQIed7m00W7sOZC8Og9tOTxwdH1Bdr6Mk2au+1n+zuo63HB8+3Hz89eoEY9HwgWqY4GlGS1WazzDfKtXqjUW7yAIPPrpb0NfK22obVYTiyIg86rh+o1QNVtu1HWByGFrbmWYn0/uB4Z21it4rqhQ6bcyOXi85Wxw0N9ZGsKGhXc1e5culNWJG+d7ad6Ux8n1A+t4YCJWaiTzDWoHoabiNd0uzIJGqXaFPUzhRRwuYAIfvtJurpEkb7Y7WLjRyFxYP8YJSD/b0XaLO9//Vee3vn+PEuam9tHTzdPzlG7aMd1H7W3t1rb+7thLsDrGRqHDxpn+xuuTxo66i99c3OEdp8ETtVImMiXSJzpcg3+CLiGwJ8VOCjCh81+KjDRwM+mvDRKvIVQl8B+grQV4C+AvQVoK8AfQXoK0BfaXkjnmRUuKWXFPjt2YHW/P2FMtzIwewOhkVZIoED4HQnjyTIApi/Imv0tQNNNrHJizniXw7yGaDRzIceD7ZcjiwprEl5IOFMy5BH+YIn3U0DOfClgASNm8+dfvUSPYJ5AAj/I32sSaERBZbHKZlBvSF6GaKhb6pVsS+LF/lCZEazZdbT7S5Kmyga71mnQ8G1w8auHYelVv4bxHqPiYfOa6ELeDEEr48UalPHLvbLngl5Xf388c98+T7rb80FWskllO/nktntqxISEkdDrmqWrslVCylNpiRXvXwfKF0SXoWVTtf5FvEpmddodxttKbqGMwfivxrTDcp/NWdnIVdrLi6+HGRDfj6yFbNEhSwSE2k0vK0hdpXMinh6EvAhEF7qY9Qb6DqZmIS6F6D1b0Xg3VsbMhPIyjm2REgoMsgagIQkjsvFs1XUE50Qk42Xk7tDs2pMtGUZyuqbXOKUbqirzqtd8hxTRbJ8ROPMFw5gE0imOqlMDmTk0aEOJdnIs6b4GrORGEGCWUyMgJQ7lCwU2nmiGToQaJBESyT1XYOzs+fc3Vj3oTiSgzsyFVsDXSqJY2vA0djwtyLd5nYsfYi1DaHSaNRarXKr1uLrtdp9oSbUGlvlPl8ti2IXS/1uvSb2hIbYqLSwxIuCUK90+QckpqqitQGZZQ9Madh5bacrbfAPMDGgsgFv7Yfk/56zh4a3vQ8UvScqeANrnafHD9yNOiW1Bwo0RNiGrJsPzrCGDdHCHRPS0HStYyeumUSDKZ9tVPq1Wq3fapLu8f2e1BDFcq9a7dea/Zog4L4aegR8R4wFfeVojhlYL9hOYinrNwsLz4rvog8E94k5SV44fdze8naG3+xsv0Tee2SE4szgy3z43YS4rgbMQHYJzo9IF/Rhd5I0CVOt5Pu1xZ9EGcPnvF0Pd+h3YdXxzD2FLNp8XJvpPiV9Uqm8IDF9ZLHn5+eBCWqb+pTFsDw6qnnGvsy0O6+eth+dkD1OY7v94vjKTB5R2xuZQVv3Brg3HOmkM2mmJmzzmxqYk0zdG3WnNXX410xqRUIowC+ljxz7R0d8GwOLsMTAAiUruECZNsBQNy0/yAgZQQbKvIHG4c1aAULiCnAFJK0Cuz0p6AhRhgUCT4w/rjj4CGk+mDMAOayLuSAtENntScEo1gVxAYlalzBUICg1mnVyxdvhNoalypLDEpRpQhOUWcITddryQ1RlihAFZZEw5fBnrZNK6jpxhaStFZsmKWRV4pkWDFsxfrri0FXJ8s0C4cthX9w1WWHMpkkKZYmuSQpn1OqEsQoh7VAcyqYlaolxDcptjG3Va4htUKaNb1BmjXHUk8uPc9Up4xyURWOdIyNrUVUzF5UrKGth2XRJca+azHgFsQ/KcuNfdRqfLRgDHRFX47JpYqFNlxQPU1wGBUhqEPmOxMFY4YVK0Da3MdTVlhjqbhBsVcsIY0uErWqJc38+2Kp2Q2CrWpKtlwdbJZt6PtgqYuok2KoOQcUUL0Xt7G6AVvUl76BuIGhVn2KntGTQqp44/10B84FW9RsGWtXTfLBc0CrdBfODVrEuSAOtGhCS/k6UTUM2RPXu4FaNa/jd7gbjVo0pf5+7BtyqkbpUXCHz41aNG4pbNbJ8s3zcKts1i+FWia7Jwq2aENVeiVpvPIz8+uYvtzGyNa8hskG5BahVc8ooB+WaUKtm5pJyBS2GWjVvOGrVnMZn14NaTeeyxVGrFJdBAZIWxL2RoZ+RvZyKjTsBXbWWGO9uEHTVyohlS4SuWokLYD7oqnVDoKtWkq2XB10lm3o+6Cpi6iToii9DaKGBQJRUOWbQtzG2kFH/CmCF8wzLU+yaloxgTfpw1RAWX75hGNbEEv8PIFaGF+ZHseK9kAZj8ZBgzg51zdKVuwNi8cvMNnfLDUax+Kw0dLdcA4zFJydL+6XMj2PxfDzXFw9k8YmZ625ZPpI1hXcWg7KSvZOFZfE02b1tvBprdw7L4ped9e6WWwBm8dOkw7vlmtCsSZ+WDWfxMdnabrkReNbEUl8AoDWl1xZHtNK8BoXS0LT6Zpmv+/8JNpRbGfCWmU9/g9AsPitxfpn/gDA5HXvOf0EYyr/+YgEtPjEXfon/hjDZ2nP+I8KItRMxLZre3hLqQou/I3jWslPabyKeNU3u+rLxrPTM5wXwrJhU5y8bz0pNR18ynpXuhQXwrFgvpOJZNP98d/tg/+Sb/YPndwjSWmYmultuMqSVlaLuluuAtJITqf1SFoC0avFcXz6klZjV7pZrgLSyvbMgpJXonUxIiybCdwciWdp3DNBadka8W24DoDVNqrxbrgvQSk/c9ktaENCKyeR2y80AtFKz691yTYDWdF67AkArxWtQKA1NuVcv3dhwB0CtZSbb3yRQKyurfpmgVnKu9pygVig5+8sFtRIT5ZcIaiVbe05QK2LtRFCL5r67a/A3dwTXWnbS+03EtabJbl82rpWeG70ArhWTDP1l41qpCetLxrXSvbAArhXrhVRci2aoP8EDLMm8cIdgrWVmqbvlJsNaWenrbrkOWCs5ydovZQFYqxXP9eXDWokZ7265Blgr2zsLwlqJ3smCtYRy8h+N8ZfbGOCEZWfKu+UWAFvCNBn0brkmYEtIz+X2S1oM2BJicrvdciOALSE1494t1wNsTem1xYEtv9fsvwUf9zfgnUJv4NygkwEcTHGo60q+4ggYcao4ysOfpy8iWXLqon/MnnfPZ3B86f4p/MeiiTYxhjMf1JGCLSwhjuMCpPbf8z/Q4LCL0kG/Tw/NgD/T7x7yQOZmobDKlrwaMu6Cryt5lh6BIKF2rwfHIvjUHhNXSkSa7VJK5TurwzmBYV+3QOMLfWwgtwOiLQo9hzMrDuDsIbEPx63wZWTpiPhREi/NwB/3Z/2jgsOy4JvvjMGMs8PcM7liz1mdSkoSc5y4lOO+vo/VHLimkhUrKHbU2eKmkTWDxOnFzSk0U2Ks3GyhmRLjuH4VGjF50tl56Y4KnRmZNj+mmQaxXAHRKREhRXpWHGGyRccqmJqFmYo0Nr5MzcLMEO7mu2bTMcc4ZtMBciMnxd9BHdNQz6djrrk78yBmWYBz6JghfsyhYNrAN4f0mP3NNOEvc8XFyJ1e9GxCp/fqDBLnE5omblaJGbJmm8tTboQXlTLtY3A+KTPwR0XMxuznn5nzvbdzmZEtUdw7V+I7//1VSI/bY11p92POP/YOQr4i+8SouULRQSv9KvdXuTdFrhMu3l253Ml1x+UGLOvEHVK8czNPBqI2BIyLYlPMBPeLPSdZUE+9Wzgb1jtP0z0k1jnxFflgRUbuo04HTvHudOhh7p0OAIedjnNQrHtI7P8B0aT6GQ==")))
| 1,816.75
| 7,167
| 0.964084
| 234
| 7,267
| 29.940171
| 0.991453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151269
| 0.002064
| 7,267
| 4
| 7,167
| 1,816.75
| 0.81481
| 0.010458
| 0
| 0
| 0
| 0.5
| 0.990821
| 0.990821
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
411f9951d937d6eb761ef347f05ea8a7249c1701
| 128
|
py
|
Python
|
bayesiantesting/utils/__init__.py
|
SimonBoothroyd/bayesiantesting
|
d9602eb23c74884e6cc53b0c8533b65f7b315278
|
[
"MIT"
] | 1
|
2020-03-25T02:41:59.000Z
|
2020-03-25T02:41:59.000Z
|
bayesiantesting/utils/__init__.py
|
SimonBoothroyd/bayesiantesting
|
d9602eb23c74884e6cc53b0c8533b65f7b315278
|
[
"MIT"
] | 19
|
2019-11-21T16:41:39.000Z
|
2021-09-13T17:25:16.000Z
|
bayesiantesting/utils/__init__.py
|
SimonBoothroyd/bayesiantesting
|
d9602eb23c74884e6cc53b0c8533b65f7b315278
|
[
"MIT"
] | null | null | null |
from .utils import get_data_filename, temporarily_change_directory
__all__ = [get_data_filename, temporarily_change_directory]
| 32
| 66
| 0.875
| 16
| 128
| 6.25
| 0.625
| 0.14
| 0.3
| 0.52
| 0.82
| 0.82
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 128
| 3
| 67
| 42.666667
| 0.847458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
f5e76b56af1cba58f1024d105badc89864eed6ed
| 355
|
py
|
Python
|
tests/internal/instance_type/test_instance_type_inf_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_inf_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/instance_type/test_instance_type_inf_auto.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | 1
|
2021-12-15T11:58:22.000Z
|
2021-12-15T11:58:22.000Z
|
# Testing module instance_type.inf
import pytest
import ec2_compare.internal.instance_type.inf
def test_get_internal_data_instance_type_inf_get_instances_list():
assert len(ec2_compare.internal.instance_type.inf.get_instances_list()) > 0
def test_get_internal_data_instance_type_inf_get():
assert len(ec2_compare.internal.instance_type.inf.get) > 0
| 35.5
| 77
| 0.850704
| 56
| 355
| 4.946429
| 0.339286
| 0.259928
| 0.32491
| 0.259928
| 0.826715
| 0.826715
| 0.613718
| 0.613718
| 0.613718
| 0
| 0
| 0.015198
| 0.073239
| 355
| 9
| 78
| 39.444444
| 0.826748
| 0.090141
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
f5ec27d5619c14ae5b046f5eb31721572d6aee8b
| 158
|
py
|
Python
|
vmraid/patches/v12_0/setup_email_linking.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
vmraid/patches/v12_0/setup_email_linking.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
vmraid/patches/v12_0/setup_email_linking.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from vmraid.desk.page.setup_wizard.install_fixtures import setup_email_linking
def execute():
setup_email_linking()
| 26.333333
| 78
| 0.860759
| 22
| 158
| 5.681818
| 0.727273
| 0.16
| 0.272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082278
| 158
| 6
| 79
| 26.333333
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.