hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f0bad83a86533791a2456ac5e8d65f96bbbff66f
| 216
|
py
|
Python
|
python/src/component/__init__.py
|
wwitzel3/octant-example-plugins
|
f22107282496cf8a7202a73e47fb9149027dd7cb
|
[
"Apache-2.0"
] | 8
|
2020-03-04T15:53:45.000Z
|
2021-09-03T01:29:42.000Z
|
python/src/component/__init__.py
|
wwitzel3/octant-example-plugins
|
f22107282496cf8a7202a73e47fb9149027dd7cb
|
[
"Apache-2.0"
] | null | null | null |
python/src/component/__init__.py
|
wwitzel3/octant-example-plugins
|
f22107282496cf8a7202a73e47fb9149027dd7cb
|
[
"Apache-2.0"
] | 5
|
2020-02-19T17:07:24.000Z
|
2021-06-03T22:37:18.000Z
|
from component.view import View
from component.text import Text
from component.title import Title
from component.list import List
from component.link import Link
from component.card import (
Card,
CardList,
)
| 24
| 33
| 0.796296
| 31
| 216
| 5.548387
| 0.322581
| 0.453488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157407
| 216
| 9
| 34
| 24
| 0.945055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f0e28db647a5b5fc43fe516ce3b901c0ec62d1b7
| 48
|
py
|
Python
|
test/login.py
|
zhonghuasweet/test21
|
b130baa8f5feea5bf1acc75e9d5376b342d08548
|
[
"MIT"
] | null | null | null |
test/login.py
|
zhonghuasweet/test21
|
b130baa8f5feea5bf1acc75e9d5376b342d08548
|
[
"MIT"
] | null | null | null |
test/login.py
|
zhonghuasweet/test21
|
b130baa8f5feea5bf1acc75e9d5376b342d08548
|
[
"MIT"
] | null | null | null |
num =10
num = 20
num = 30
num = 40
num = 50 dev
| 8
| 12
| 0.583333
| 11
| 48
| 2.545455
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.30303
| 0.3125
| 48
| 5
| 13
| 9.6
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f0f10b827959863b0aa42df5fa35aca1d17a3dd7
| 2,570
|
py
|
Python
|
test/python/tests/test_random.py
|
bh107/bohrium
|
5b83e7117285fefc7779ed0e9acb0f8e74c7e068
|
[
"Apache-2.0"
] | 236
|
2015-03-31T15:39:30.000Z
|
2022-03-24T01:43:14.000Z
|
test/python/tests/test_random.py
|
bh107/bohrium
|
5b83e7117285fefc7779ed0e9acb0f8e74c7e068
|
[
"Apache-2.0"
] | 324
|
2015-05-27T10:35:38.000Z
|
2021-12-10T07:34:10.000Z
|
test/python/tests/test_random.py
|
bh107/bohrium
|
5b83e7117285fefc7779ed0e9acb0f8e74c7e068
|
[
"Apache-2.0"
] | 41
|
2015-05-26T12:38:42.000Z
|
2022-01-10T15:16:37.000Z
|
import util
class test_random_nontrivial:
def init(self):
cmd_bh = "R = M.random.RandomState(42); "
for shape in [10, (10,), (10, 11)]:
cmd_np = "res = np.zeros(%s, dtype=np.bool); " % repr(shape)
cmd_np += "res.flat[0] = True; "
for dtype in util.TYPES.FLOAT:
yield cmd_np, cmd_bh, shape, dtype
def test_random(self, arg):
cmd_np, cmd_bh, shape, dtype = arg
cmd_bh += "a = R.random(%s, dtype=%s); " % (shape, dtype)
cmd_bh += "res = a == a.flatten()[0]"
return cmd_np, cmd_bh
def test_rand(self, arg):
cmd_np, cmd_bh, shape, dtype = arg
if isinstance(shape, int):
shape = (shape,)
cmd_bh += "a = R.rand(*%s, dtype=%s); " % (shape, dtype)
cmd_bh += "res = a == a.flatten()[0]"
return cmd_np, cmd_bh
def test_standard_normal(self, arg):
cmd_np, cmd_bh, shape, dtype = arg
cmd_bh += "a = R.standard_normal(%s, dtype=%s); " % (shape, dtype)
cmd_bh += "res = a == a.flatten()[0]"
return cmd_np, cmd_bh
def test_randn(self, arg):
cmd_np, cmd_bh, shape, dtype = arg
if isinstance(shape, int):
shape = (shape,)
cmd_bh += "a = R.randn(*%s, dtype=%s); " % (shape, dtype)
cmd_bh += "res = a == a.flatten()[0]"
return cmd_np, cmd_bh
def test_standard_exponential(self, arg):
cmd_np, cmd_bh, shape, dtype = arg
cmd_bh += "a = R.standard_exponential(%s, dtype=%s); " % (shape, dtype)
cmd_bh += "res = a == a.flatten()[0]"
return cmd_np, cmd_bh
def test_randint(self, arg):
cmd_np, cmd_bh, shape, dtype = arg
cmd_bh += "a = R.randint(1000, size=%s, dtype=%s); " % (shape, dtype)
cmd_bh += "res = a == a.flatten()[0]"
return cmd_np, cmd_bh
def test_random_integers(self, arg):
cmd_np, cmd_bh, shape, dtype = arg
cmd_bh += "a = R.random_integers(1000, size=%s, dtype=%s); " % (shape, dtype)
cmd_bh += "res = a == a.flatten()[0]"
return cmd_np, cmd_bh
def test_uniform(self, arg):
cmd_np, cmd_bh, shape, dtype = arg
cmd_bh += "a = R.uniform(0, 10, size=%s, dtype=%s); " % (shape, dtype)
cmd_bh += "res = a == a.flatten()[0]"
return cmd_np, cmd_bh
def test_normal(self, arg):
cmd_np, cmd_bh, shape, dtype = arg
cmd_bh += "a = R.normal(0, 10, size=%s, dtype=%s); " % (shape, dtype)
cmd_bh += "res = a == a.flatten()[0]"
return cmd_np, cmd_bh
| 36.714286
| 85
| 0.535798
| 387
| 2,570
| 3.361757
| 0.129199
| 0.146042
| 0.116833
| 0.146042
| 0.7794
| 0.7794
| 0.764028
| 0.764028
| 0.764028
| 0.764028
| 0
| 0.018878
| 0.299222
| 2,570
| 69
| 86
| 37.246377
| 0.703498
| 0
| 0
| 0.534483
| 0
| 0
| 0.249416
| 0.036965
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.017241
| 0
| 0.362069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0b0994818747947561ff51834b288de828a2fcad
| 7,090
|
py
|
Python
|
vg/test_shape.py
|
lace/vx
|
33134cae43d7729b6128b198119e1593035066ae
|
[
"BSD-2-Clause"
] | 100
|
2019-01-18T05:08:34.000Z
|
2022-03-24T09:59:11.000Z
|
vg/test_shape.py
|
lace/vg
|
bece5191756b43378e882fd1fdf0ffa45a06e467
|
[
"BSD-2-Clause"
] | 153
|
2018-11-16T17:44:28.000Z
|
2022-03-10T23:33:50.000Z
|
vg/test_shape.py
|
lace/vx
|
33134cae43d7729b6128b198119e1593035066ae
|
[
"BSD-2-Clause"
] | 14
|
2019-05-17T15:05:52.000Z
|
2022-03-09T08:42:53.000Z
|
import numpy as np
import pytest
from vg.shape import check, check_value, check_value_any, columnize
def test_check_value_valid():
check_value(np.zeros(3), (3,))
def test_check_value_valid_scalar():
check_value(np.int64(3), ())
def test_check_value_valid_wildcard():
assert check_value(np.zeros((5, 3)), (-1, 3)) == 5
assert check_value(np.zeros((5, 3)), (5, -1)) == 3
assert check_value(np.zeros((5, 3, 2)), (-1, 3, -1)) == (5, 2)
def test_check_value_wrong_shape():
with pytest.raises(ValueError) as e:
check_value(np.zeros(4), (3,))
assert "Expected an array with shape (3,); got (4,)" in str(e.value)
def test_check_value_wrong_shape_wildcard():
with pytest.raises(ValueError) as e:
check_value(np.zeros((5, 4)), (-1, 3))
assert "Expected an array with shape (-1, 3); got (5, 4)" in str(e.value)
def test_check_value_none():
with pytest.raises(ValueError) as e:
check_value(None, (3,))
assert "Expected an array with shape (3,); got None" in str(e.value)
def test_check_value_wrong_type():
with pytest.raises(ValueError) as e:
check_value({}, (3,))
assert "Expected an array with shape (3,); got dict" in str(e.value)
class Value:
def __init__(self):
self.shape = None
with pytest.raises(ValueError) as e:
check_value(Value(), (3,))
assert "Expected an array with shape (3,); got Value" in str(e.value)
def test_check_value_valid_named():
check_value(np.zeros(3), (3,), name="input_value")
def test_check_value_valid_wildcard_named():
assert check_value(np.zeros((5, 3)), (-1, 3), name="input_value") == 5
assert check_value(np.zeros((5, 3)), (5, -1), name="input_value") == 3
def test_check_value_wrong_shape_named():
with pytest.raises(ValueError) as e:
check_value(np.zeros(4), (3,), name="input_value")
assert "input_value must be an array with shape (3,); got (4,)" in str(e.value)
def test_check_value_wrong_shape_wildcard_named():
with pytest.raises(ValueError) as e:
check_value(np.zeros((5, 4)), (-1, 3), name="input_value")
assert "input_value must be an array with shape (-1, 3); got (5, 4)" in str(e.value)
def test_check_value_none_named():
with pytest.raises(ValueError) as e:
check_value(None, (3,), name="input_value")
assert "input_value must be an array with shape (3,); got None" in str(e.value)
def test_check_value_with_invalid_shape_raises_expected_error():
with pytest.raises(ValueError) as e:
check_value(np.zeros(3), (3.0,))
assert "Expected shape dimensions to be int" in str(e.value)
def test_check_value_any_valid():
assert check_value_any(np.zeros((3,)), (3,), (-1, 3), name="points") is None
assert check_value_any(np.zeros((12, 3)), (3,), (-1, 3), name="points") == 12
assert check_value_any(np.zeros((0, 3)), (3,), (-1, 3), name="points") == 0
assert check_value_any(
np.zeros((5, 3, 3)), (-1, 3), (-1, -1, 3), name="points"
) == (5, 3)
def test_check_value_any_errors():
with pytest.raises(ValueError, match="At least one shape is required"):
check_value_any(np.zeros(9).reshape(-3, 3))
with pytest.raises(
ValueError, match=r"Expected an array with shape \(3,\) or \(-1, 3\); got list"
):
check_value_any([1, 2, 3], (3,), (-1, 3))
with pytest.raises(
ValueError, match=r"Expected an array with shape \(3,\); got list"
):
check_value_any([1, 2, 3], (3,))
def test_check_value_any_message():
with pytest.raises(
ValueError,
match=r"^Expected an array with shape \(-1, 2\) or \(2,\); got \(3, 3\)$",
):
check_value_any(np.zeros(9).reshape(-3, 3), (-1, 2), (2,))
with pytest.raises(
ValueError,
match=r"^Expected coords to be an array with shape \(-1, 2\) or \(2,\); got \(3, 3\)$",
):
check_value_any(np.zeros(9).reshape(-3, 3), (-1, 2), (2,), name="coords")
with pytest.raises(
ValueError,
match=r"^Expected coords to be an array with shape \(-1, 2\) or \(2,\); got None$",
):
check_value_any(None, (-1, 2), (2,), name="coords")
def test_check():
input_value = np.zeros(3)
check(locals(), "input_value", (3,))
def test_check_valid_wildcard():
input_value = np.zeros((5, 3))
assert check(locals(), "input_value", (-1, 3)) == 5
assert check(locals(), "input_value", (5, -1)) == 3
input_value = np.zeros((5, 3, 2))
assert check(locals(), "input_value", (-1, 3, -1)) == (5, 2)
def test_check_wrong_shape_named():
input_value = np.zeros(4)
with pytest.raises(ValueError) as e:
check(locals(), "input_value", (3,))
assert "input_value must be an array with shape (3,); got (4,)" in str(e.value)
def test_check_wrong_shape_wildcard_named():
input_value = np.zeros((5, 4))
with pytest.raises(ValueError) as e:
check(locals(), "input_value", (-1, 3))
assert "input_value must be an array with shape (-1, 3); got (5, 4)" in str(e.value)
def test_check_none_named():
input_value = None
with pytest.raises(ValueError) as e:
check(locals(), "input_value", (3,))
assert "input_value must be an array with shape (3,); got None" in str(e.value)
def test_columnize_with_2d_shape():
shape = (-1, 3)
columnized, is_columnized, transform_result = columnize(
np.array([1.0, 0.0, 0.0]), shape
)
np.testing.assert_array_equal(columnized, np.array([[1.0, 0.0, 0.0]]))
assert columnized.shape == (1, 3)
assert is_columnized is False
assert transform_result([1.0]) == 1.0
columnized, is_columnized, transform_result = columnize(
np.array([[1.0, 0.0, 0.0]]), shape
)
np.testing.assert_array_equal(columnized, np.array([[1.0, 0.0, 0.0]]))
assert columnized.shape == (1, 3)
assert is_columnized is True
assert transform_result([1.0]) == [1.0]
def test_columnize_with_3d_shape():
shape = (-1, 3, 3)
columnized, is_columnized, transform_result = columnize(
np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]), shape
)
np.testing.assert_array_equal(
columnized, np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])
)
assert columnized.shape == (1, 3, 3)
assert is_columnized is False
assert transform_result([1.0]) == 1.0
columnized, is_columnized, transform_result = columnize(
np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]), shape
)
np.testing.assert_array_equal(
columnized, np.array([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]])
)
assert columnized.shape == (1, 3, 3)
assert is_columnized is True
assert transform_result([1.0]) == [1.0]
def test_columnize_invalid_shape():
with pytest.raises(ValueError, match="shape should be a tuple"):
columnize(np.array([1.0, 0.0, 0.0]), "this is not a shape")
with pytest.raises(ValueError, match="shape should have at least two dimension"):
columnize(np.array([1.0, 0.0, 0.0]), (3,))
| 33.761905
| 95
| 0.623836
| 1,146
| 7,090
| 3.692845
| 0.078534
| 0.106333
| 0.059546
| 0.122873
| 0.878544
| 0.831994
| 0.728733
| 0.706285
| 0.654773
| 0.603497
| 0
| 0.056086
| 0.202821
| 7,090
| 209
| 96
| 33.923445
| 0.692675
| 0
| 0
| 0.366013
| 0
| 0.019608
| 0.16897
| 0
| 0
| 0
| 0
| 0
| 0.261438
| 1
| 0.163399
| false
| 0
| 0.019608
| 0
| 0.189542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9bdb83670a9680bc1640e1b4f523c7cf826d32c2
| 10,205
|
py
|
Python
|
tests/unittests/test_colony_filters.py
|
Siegallab/PIE
|
54b4dfd3fe340b1bc69187dacf8c6b583714d65b
|
[
"MIT"
] | 2
|
2021-03-24T03:05:27.000Z
|
2022-02-18T06:10:30.000Z
|
tests/unittests/test_colony_filters.py
|
Siegallab/PIE
|
54b4dfd3fe340b1bc69187dacf8c6b583714d65b
|
[
"MIT"
] | null | null | null |
tests/unittests/test_colony_filters.py
|
Siegallab/PIE
|
54b4dfd3fe340b1bc69187dacf8c6b583714d65b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import unittest
import numpy as np
import pandas as pd
from numpy.testing import assert_equal
from pandas.testing import assert_frame_equal
from PIE import colony_filters, analysis_configuration
areas_df = pd.DataFrame(
np.array(
[[1, 2, 1.5, 4],
[2, 5, 15, 16],
[np.nan, np.nan, 1, 2],
[0, 3, 8, 7.9],
[1, 1, 1, 2]]),
index = ['col_1', 'col_2', 'col_3', 'col_4', 'col_5'],
columns = [1,2,3,4])
class TestFilterByMaxAreaPixelDecrease(unittest.TestCase):
'''
Tests filtering by max area pixel decrease
'''
def setUp(self):
self.analysis_config = object.__new__(analysis_configuration.AnalysisConfig)
def test_filter_by_max_area_pixel_decrease_small(self):
'''
Test allowing areas to decrease by a small amount
'''
self.analysis_config.max_area_pixel_decrease = 0.4
filter_obj = colony_filters._FilterByMaxAreaPixelDecrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_filter_by_max_area_pixel_decrease_zero(self):
'''
Test not allowing areas to decrease by any amount
'''
self.analysis_config.max_area_pixel_decrease = 0
filter_obj = colony_filters._FilterByMaxAreaPixelDecrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_filter_by_max_area_pixel_decrease_inf(self):
'''
Test allowing areas to decrease by any amount
'''
self.analysis_config.max_area_pixel_decrease = np.inf
filter_obj = colony_filters._FilterByMaxAreaPixelDecrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.ones(areas_df.shape, dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
class test_filter_data(unittest.TestCase):
'''
Tests generic data filtration
'''
def _test_filter_max_area_fold_increase(self):
'''
Tests filtration by max_area_fold_increase
'''
filtration_type = 'max_area_fold_increase'
max_area_fold_increase = 2.5
expected_filter_pass_bool = np.array([
[1, 1, 1, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 0, 0, 0],
[1, 1, 1, 1]], dtype = bool)
expected_removed_locations = [('col_1', 4), ('col_2', 3), ('col_4', 2)]
test_filter_pass_bool, test_removed_locations = \
colony_filters.filter_data(filtration_type, areas_df,
max_area_fold_increase)
self.assertEqual(expected_removed_locations, test_removed_locations)
assert_equal(expected_filter_pass_bool, test_filter_pass_bool)
class test_filter_by_max_area_fold_decrease(unittest.TestCase):
'''
Tests filtering by max area fold decrease
'''
def setUp(self):
self.analysis_config = object.__new__(analysis_configuration.AnalysisConfig)
def test_filter_by_max_area_fold_decrease_small(self):
'''
Test allowing areas to decrease by a small amount
'''
self.analysis_config.max_area_fold_decrease = 1.2
filter_obj = colony_filters._FilterByMaxAreaFoldDecrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_filter_by_max_area_fold_decrease_very_small(self):
'''
Test not allowing areas to decrease by another small amount
'''
self.analysis_config.max_area_fold_decrease = 1.01
filter_obj = colony_filters._FilterByMaxAreaFoldDecrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_filter_by_max_area_fold_decrease_one(self):
'''
Test not allowing areas to decrease by any amount
'''
self.analysis_config.max_area_fold_decrease = 1
filter_obj = colony_filters._FilterByMaxAreaFoldDecrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 0],
[1, 1, 1, 1]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_filter_by_max_area_fold_decrease_inf(self):
'''
Test allowing areas to decrease by any amount
'''
self.analysis_config.max_area_fold_decrease = np.inf
filter_obj = colony_filters._FilterByMaxAreaFoldDecrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.ones(areas_df.shape, dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
class test_filter_by_max_area_fold_increase(unittest.TestCase):
'''
Tests filtering by max area fold increase
'''
def setUp(self):
self.analysis_config = object.__new__(analysis_configuration.AnalysisConfig)
def test_filter_by_max_area_fol_increase_small(self):
'''
Test allowing areas to increase by a small amount
'''
self.analysis_config.max_area_fold_increase = 2.5
filter_obj = colony_filters._FilterByMaxAreaFoldIncrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[1, 1, 1, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 0, 0, 0],
[1, 1, 1, 1]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_filter_by_max_area_fold_increase_one(self):
'''
Test not allowing areas to increase by any amount
'''
self.analysis_config.max_area_fold_increase = 1
filter_obj = colony_filters._FilterByMaxAreaFoldIncrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 1, 1, 0],
[1, 0, 0, 0],
[1, 1, 1, 0]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_filter_by_max_area_fold_increase_inf(self):
'''
Test allowing areas to increase by any amount
'''
self.analysis_config.max_area_fold_increase = np.inf
filter_obj = colony_filters._FilterByMaxAreaFoldIncrease(areas_df,
self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.ones(areas_df.shape, dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
class TestFilterByGrowthWindowTimepoints(unittest.TestCase):
'''
Tests filtering by growth window timepoints
'''
def setUp(self):
self.analysis_config = object.__new__(analysis_configuration.AnalysisConfig)
self.areas_with_nulls_df = pd.DataFrame(np.array([
[4, 6, 9, np.nan, np.nan, 11, 14, np.nan],
[np.nan, np.nan, 2, 3, 5, 8, 11, np.nan],
[1, 1, 2, 3, 5, 8, 12, 14]]),
index = ['col_1', 'col_2', 'col_4'],
columns = [1,2,3,4,5,6,7,8])
def test_window_size_2(self):
'''
Tests growth_window_timepoints of 2, which results in everything
passing the filter
'''
self.analysis_config.growth_window_timepoints = 2
filter_obj = colony_filters._FilterByGrowthWindowTimepoints(
self.areas_with_nulls_df, self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[1, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_window_size_3(self):
'''
Tests growth_window_timepoints of 3, which removes only two
timepoints from the row corresponding to col_1
'''
self.analysis_config.growth_window_timepoints = 3
filter_obj = colony_filters._FilterByGrowthWindowTimepoints(
self.areas_with_nulls_df, self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_window_size_8(self):
'''
Tests growth_window_timepoints of 8, which removes everything
except the last row
'''
self.analysis_config.growth_window_timepoints = 8
filter_obj = colony_filters._FilterByGrowthWindowTimepoints(
self.areas_with_nulls_df, self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = np.array([
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1]], dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
def test_window_size_8(self):
'''
Tests growth_window_timepoints of 20, which removes everything
'''
self.analysis_config.growth_window_timepoints = 20
filter_obj = colony_filters._FilterByGrowthWindowTimepoints(
self.areas_with_nulls_df, self.analysis_config)
test_filter_bool = filter_obj._filtration_method()
expected_filter_bool = \
np.zeros(self.areas_with_nulls_df.shape, dtype = bool)
assert_equal(expected_filter_bool, test_filter_bool)
class TestIdFilteredLocations(unittest.TestCase):
'''
Tests that correct tuples of areas_df index and column name is
returned for a given filter_pass bool matrix
'''
def setUp(self):
self.analysis_config = object.__new__(analysis_configuration.AnalysisConfig)
self.filter_obj = \
colony_filters._FilterBaseClass(areas_df, self.analysis_config)
def test_id_filtered_locations_simple(self):
filter_bool = np.array([
[1, 0, 0, 0],
[1, 1, 0, 1],
[1, 1, 1, 0],
[1, 1, 1, 1],
[0, 0, 0, 1]], dtype = bool)
expected_filtered_locations = \
pd.DataFrame(np.array([2,3,4,1]),
index = ['col_1', 'col_2', 'col_3', 'col_5'],
columns = ['filtered_columns'])
test_filtered_locations = \
self.filter_obj._id_removed_locations(filter_bool)
assert_frame_equal(expected_filtered_locations,
test_filtered_locations)
if __name__ == '__main__':
unittest.main()
| 32.5
| 78
| 0.72876
| 1,542
| 10,205
| 4.470169
| 0.089494
| 0.041201
| 0.046134
| 0.046424
| 0.801683
| 0.767735
| 0.727405
| 0.709851
| 0.676483
| 0.666618
| 0
| 0.041691
| 0.153846
| 10,205
| 314
| 79
| 32.5
| 0.756572
| 0.117785
| 0
| 0.607477
| 0
| 0
| 0.013786
| 0.002507
| 0
| 0
| 0
| 0
| 0.088785
| 1
| 0.098131
| false
| 0.014019
| 0.028037
| 0
| 0.154206
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9be1b65c5bd6a3c43adb1418cc3c3331f565b50a
| 188
|
py
|
Python
|
masz/exceptions/__init__.py
|
zaanposni/masz-api-wrapper
|
36bd2083f4641c010e0bdbde2029905af6c69088
|
[
"MIT"
] | 2
|
2021-08-09T08:36:04.000Z
|
2021-12-18T03:23:11.000Z
|
masz/exceptions/__init__.py
|
zaanposni/masz-api-wrapper
|
36bd2083f4641c010e0bdbde2029905af6c69088
|
[
"MIT"
] | 13
|
2021-07-18T18:24:00.000Z
|
2021-07-27T15:11:52.000Z
|
masz/exceptions/__init__.py
|
zaanposni/masz-api-wrapper
|
36bd2083f4641c010e0bdbde2029905af6c69088
|
[
"MIT"
] | null | null | null |
from .login_failure import MASZLoginFailure
from .request_failure import MASZRequestFailure
from .base_exception import MASZBaseException
from .invalid_response import MASZInvalidResponse
| 37.6
| 49
| 0.893617
| 20
| 188
| 8.2
| 0.65
| 0.158537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 188
| 4
| 50
| 47
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
502a50117dc93a4a3782dffb6b03adf0cbceb88c
| 13,240
|
py
|
Python
|
swagger_client/models/__init__.py
|
idaholab/Deep-Lynx-Python-Package
|
99927cc877eba8e2ee396feec807da1c48c64893
|
[
"MIT"
] | 3
|
2021-06-16T20:34:41.000Z
|
2021-06-16T23:54:36.000Z
|
swagger_client/models/__init__.py
|
idaholab/Deep-Lynx-Python-Package
|
99927cc877eba8e2ee396feec807da1c48c64893
|
[
"MIT"
] | null | null | null |
swagger_client/models/__init__.py
|
idaholab/Deep-Lynx-Python-Package
|
99927cc877eba8e2ee396feec807da1c48c64893
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
Deep Lynx
The construction of megaprojects has consistently demonstrated challenges for project managers in regard to meeting cost, schedule, and performance requirements. Megaproject construction challenges are common place within megaprojects with many active projects in the United States failing to meet cost and schedule efforts by significant margins. Currently, engineering teams operate in siloed tools and disparate teams where connections across design, procurement, and construction systems are translated manually or over brittle point-to-point integrations. The manual nature of data exchange increases the risk of silent errors in the reactor design, with each silent error cascading across the design. These cascading errors lead to uncontrollable risk during construction, resulting in significant delays and cost overruns. Deep Lynx allows for an integrated platform during design and operations of mega projects. The Deep Lynx Core API delivers a few main features. 1. Provides a set of methods and endpoints for manipulating data in an object oriented database. This allows us to store complex datatypes as records and then to compile them into actual, modifiable objects at run-time. Users can store taxonomies or ontologies in a readable format. 2. Provides methods for storing and retrieving data in a graph database. This data is structured and validated against the aformentioned object oriented database before storage. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from swagger_client.models.add_data_to_import_response import AddDataToImportResponse
from swagger_client.models.assign_role_request import AssignRoleRequest
from swagger_client.models.batch_container_update_request import BatchContainerUpdateRequest
from swagger_client.models.batch_update_container_response import BatchUpdateContainerResponse
from swagger_client.models.challenge import Challenge
from swagger_client.models.challenge_methods import ChallengeMethods
from swagger_client.models.container import Container
from swagger_client.models.container_config import ContainerConfig
from swagger_client.models.container_import_request import ContainerImportRequest
from swagger_client.models.container_import_response import ContainerImportResponse
from swagger_client.models.container_import_update_response import ContainerImportUpdateResponse
from swagger_client.models.container_invite import ContainerInvite
from swagger_client.models.containers_datasources_imports_request import ContainersDatasourcesImportsRequest
from swagger_client.models.containers_import_body import ContainersImportBody
from swagger_client.models.containers_query_response import ContainersQueryResponse
from swagger_client.models.context import Context
from swagger_client.models.create_container_request import CreateContainerRequest
from swagger_client.models.create_container_response import CreateContainerResponse
from swagger_client.models.create_data_source_config import CreateDataSourceConfig
from swagger_client.models.create_data_source_request import CreateDataSourceRequest
from swagger_client.models.create_data_sources_response import CreateDataSourcesResponse
from swagger_client.models.create_event_response import CreateEventResponse
from swagger_client.models.create_import_response import CreateImportResponse
from swagger_client.models.create_manual_import import CreateManualImport
from swagger_client.models.create_manual_import_response import CreateManualImportResponse
from swagger_client.models.create_metatype_keys_response import CreateMetatypeKeysResponse
from swagger_client.models.create_metatype_relationship_keys_response import CreateMetatypeRelationshipKeysResponse
from swagger_client.models.create_metatype_relationship_pairs_response import CreateMetatypeRelationshipPairsResponse
from swagger_client.models.create_metatype_relationships_response import CreateMetatypeRelationshipsResponse
from swagger_client.models.create_metatypes_response import CreateMetatypesResponse
from swagger_client.models.create_or_update_edges_request import CreateOrUpdateEdgesRequest
from swagger_client.models.create_or_update_nodes_request import CreateOrUpdateNodesRequest
from swagger_client.models.create_registered_event_request import CreateRegisteredEventRequest
from swagger_client.models.create_transformation_response import CreateTransformationResponse
from swagger_client.models.create_type_mapping_transformations_request import CreateTypeMappingTransformationsRequest
from swagger_client.models.credential_validation_result import CredentialValidationResult
from swagger_client.models.data_export_config import DataExportConfig
from swagger_client.models.data_source import DataSource
from swagger_client.models.data_source_config import DataSourceConfig
from swagger_client.models.data_source_id_files_body import DataSourceIdFilesBody
from swagger_client.models.data_source_import import DataSourceImport
from swagger_client.models.data_staging import DataStaging
from swagger_client.models.edge import Edge
from swagger_client.models.error_model import ErrorModel
from swagger_client.models.error_response import ErrorResponse
from swagger_client.models.event import Event
from swagger_client.models.exporter import Exporter
from swagger_client.models.exporter_config import ExporterConfig
from swagger_client.models.file_info import FileInfo
from swagger_client.models.file_model import FileModel
from swagger_client.models.generic200_response import Generic200Response
from swagger_client.models.get_container_response import GetContainerResponse
from swagger_client.models.get_data_export_response import GetDataExportResponse
from swagger_client.models.get_data_source_response import GetDataSourceResponse
from swagger_client.models.get_data_type_mapping_response import GetDataTypeMappingResponse
from swagger_client.models.get_edge_response import GetEdgeResponse
from swagger_client.models.get_event_response import GetEventResponse
from swagger_client.models.get_file_info_response import GetFileInfoResponse
from swagger_client.models.get_import_data_response import GetImportDataResponse
from swagger_client.models.get_metatype_key_response import GetMetatypeKeyResponse
from swagger_client.models.get_metatype_relationship_key_response import GetMetatypeRelationshipKeyResponse
from swagger_client.models.get_metatype_relationship_pair_response import GetMetatypeRelationshipPairResponse
from swagger_client.models.get_metatype_relationship_response import GetMetatypeRelationshipResponse
from swagger_client.models.get_metatype_response import GetMetatypeResponse
from swagger_client.models.get_node_response import GetNodeResponse
from swagger_client.models.get_user_response import GetUserResponse
from swagger_client.models.import_container_id_body import ImportContainerIdBody
from swagger_client.models.import_id_data_body import ImportIdDataBody
from swagger_client.models.import_model import ImportModel
from swagger_client.models.inline_response200 import InlineResponse200
from swagger_client.models.key_validation import KeyValidation
from swagger_client.models.list_container_invites_response import ListContainerInvitesResponse
from swagger_client.models.list_container_response import ListContainerResponse
from swagger_client.models.list_data_exports_response import ListDataExportsResponse
from swagger_client.models.list_data_source_imports_response import ListDataSourceImportsResponse
from swagger_client.models.list_data_sources_response import ListDataSourcesResponse
from swagger_client.models.list_data_type_mapping_response import ListDataTypeMappingResponse
from swagger_client.models.list_edge_files import ListEdgeFiles
from swagger_client.models.list_edges_response import ListEdgesResponse
from swagger_client.models.list_events_response import ListEventsResponse
from swagger_client.models.list_import_data_response import ListImportDataResponse
from swagger_client.models.list_metatype_keys_response import ListMetatypeKeysResponse
from swagger_client.models.list_metatype_relationship_keys_response import ListMetatypeRelationshipKeysResponse
from swagger_client.models.list_metatype_relationship_pairs_response import ListMetatypeRelationshipPairsResponse
from swagger_client.models.list_metatype_relationships_response import ListMetatypeRelationshipsResponse
from swagger_client.models.list_metatypes_response import ListMetatypesResponse
from swagger_client.models.list_node_files import ListNodeFiles
from swagger_client.models.list_nodes_response import ListNodesResponse
from swagger_client.models.list_transformation_response import ListTransformationResponse
from swagger_client.models.list_user_invites_response import ListUserInvitesResponse
from swagger_client.models.list_user_permissions_response import ListUserPermissionsResponse
from swagger_client.models.list_user_roles import ListUserRoles
from swagger_client.models.list_users_for_container_response import ListUsersForContainerResponse
from swagger_client.models.list_users_response import ListUsersResponse
from swagger_client.models.mappings_import_body import MappingsImportBody
from swagger_client.models.metatype import Metatype
from swagger_client.models.metatype_key import MetatypeKey
from swagger_client.models.metatype_relationship import MetatypeRelationship
from swagger_client.models.new_data_export_request import NewDataExportRequest
from swagger_client.models.new_metatype_key_request import NewMetatypeKeyRequest
from swagger_client.models.new_metatype_relationship_key_request import NewMetatypeRelationshipKeyRequest
from swagger_client.models.new_metatype_relationship_pair_request import NewMetatypeRelationshipPairRequest
from swagger_client.models.new_metatype_relationship_request import NewMetatypeRelationshipRequest
from swagger_client.models.new_metatype_request import NewMetatypeRequest
from swagger_client.models.node import Node
from swagger_client.models.node_metatype_body import NodeMetatypeBody
from swagger_client.models.not_found404 import NotFound404
from swagger_client.models.one_ofinline_response200 import OneOfinlineResponse200
from swagger_client.models.prompt import Prompt
from swagger_client.models.rsa_cancel_request import RSACancelRequest
from swagger_client.models.rsa_init_request import RSAInitRequest
from swagger_client.models.rsa_response import RSAResponse
from swagger_client.models.rsa_status_request import RSAStatusRequest
from swagger_client.models.rsa_status_response import RSAStatusResponse
from swagger_client.models.rsa_verify_request import RSAVerifyRequest
from swagger_client.models.relationship_key import RelationshipKey
from swagger_client.models.relationship_pair import RelationshipPair
from swagger_client.models.required_method import RequiredMethod
from swagger_client.models.token_exchange_request import TokenExchangeRequest
from swagger_client.models.transformation import Transformation
from swagger_client.models.transformation_condition import TransformationCondition
from swagger_client.models.transformation_key import TransformationKey
from swagger_client.models.type_mapping import TypeMapping
from swagger_client.models.type_mapping_export_payload import TypeMappingExportPayload
from swagger_client.models.update_container_request import UpdateContainerRequest
from swagger_client.models.update_container_response import UpdateContainerResponse
from swagger_client.models.update_data_source_response import UpdateDataSourceResponse
from swagger_client.models.update_data_type_mapping_response import UpdateDataTypeMappingResponse
from swagger_client.models.update_import_data_response import UpdateImportDataResponse
from swagger_client.models.update_metatype_key_request import UpdateMetatypeKeyRequest
from swagger_client.models.update_metatype_key_response import UpdateMetatypeKeyResponse
from swagger_client.models.update_metatype_relationship_key_response import UpdateMetatypeRelationshipKeyResponse
from swagger_client.models.update_metatype_relationship_pair_response import UpdateMetatypeRelationshipPairResponse
from swagger_client.models.update_metatype_relationship_request import UpdateMetatypeRelationshipRequest
from swagger_client.models.update_metatype_relationship_response import UpdateMetatypeRelationshipResponse
from swagger_client.models.update_metatype_request import UpdateMetatypeRequest
from swagger_client.models.update_metatype_response import UpdateMetatypeResponse
from swagger_client.models.update_registered_event_request import UpdateRegisteredEventRequest
from swagger_client.models.update_transformation_response import UpdateTransformationResponse
from swagger_client.models.upload_file_response import UploadFileResponse
from swagger_client.models.user import User
from swagger_client.models.user_key import UserKey
from swagger_client.models.validate_metatype_properties_response import ValidateMetatypePropertiesResponse
from swagger_client.models.validation import Validation
from swagger_client.models.value import Value
from swagger_client.models.version import Version
| 81.226994
| 1,455
| 0.909743
| 1,585
| 13,240
| 7.30347
| 0.234069
| 0.138735
| 0.214409
| 0.290083
| 0.383552
| 0.21199
| 0.091223
| 0
| 0
| 0
| 0
| 0.002664
| 0.06435
| 13,240
| 162
| 1,456
| 81.728395
| 0.931789
| 0.122054
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
acbe2132ffd43cbecc4978bde45db0aa9a56e88c
| 156
|
py
|
Python
|
dataloader/__init__.py
|
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
|
93871bed9078d5bf6b4bb37407c9dce87c569b55
|
[
"MIT"
] | null | null | null |
dataloader/__init__.py
|
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
|
93871bed9078d5bf6b4bb37407c9dce87c569b55
|
[
"MIT"
] | null | null | null |
dataloader/__init__.py
|
ColinWine/Multi-modal-Multi-label-Facial-Action-Unit-Detection-with-Transformer
|
93871bed9078d5bf6b4bb37407c9dce87c569b55
|
[
"MIT"
] | null | null | null |
from .aff2compdataset import Aff2CompDataset
from .testset import Aff2TestDataset
from .utils import SubsetSequentialSampler, Prefetcher,SubsetRandomSampler
| 52
| 74
| 0.891026
| 14
| 156
| 9.928571
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020833
| 0.076923
| 156
| 3
| 74
| 52
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
acc6e7ec5684a204f60716937f61ce89db7acd41
| 90,723
|
py
|
Python
|
custom/icds_reports/tests/agg_tests/reports/test_awc_reports.py
|
roboton/commcare-hq
|
3ccbe59508d98dd1963ca87cf249dd2df8af8ecc
|
[
"BSD-3-Clause"
] | null | null | null |
custom/icds_reports/tests/agg_tests/reports/test_awc_reports.py
|
roboton/commcare-hq
|
3ccbe59508d98dd1963ca87cf249dd2df8af8ecc
|
[
"BSD-3-Clause"
] | null | null | null |
custom/icds_reports/tests/agg_tests/reports/test_awc_reports.py
|
roboton/commcare-hq
|
3ccbe59508d98dd1963ca87cf249dd2df8af8ecc
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import datetime
from datetime import date
from django.core.serializers.json import DjangoJSONEncoder
from django.test import TestCase
from mock import mock
from custom.icds_reports.const import AADHAR_SEEDED_BENEFICIARIES, CHILDREN_ENROLLED_FOR_ANGANWADI_SERVICES, \
PREGNANT_WOMEN_ENROLLED_FOR_ANGANWADI_SERVICES, LACTATING_WOMEN_ENROLLED_FOR_ANGANWADI_SERVICES, \
OUT_OF_SCHOOL_ADOLESCENT_GIRLS_11_14_YEARS
from custom.icds_reports.reports.awc_reports import get_beneficiary_details, get_awc_reports_system_usage, \
get_awc_reports_pse, get_awc_reports_maternal_child, get_awc_report_demographics, \
get_awc_report_beneficiary, get_awc_report_pregnant, get_pregnant_details, get_awc_report_lactating
from custom.icds_reports.messages import new_born_with_low_weight_help_text, wasting_help_text, \
exclusive_breastfeeding_help_text, early_initiation_breastfeeding_help_text, \
children_initiated_appropriate_complementary_feeding_help_text, institutional_deliveries_help_text, \
percent_aadhaar_seeded_beneficiaries_help_text, percent_children_enrolled_help_text, \
percent_pregnant_women_enrolled_help_text, percent_lactating_women_enrolled_help_text, \
percent_adolescent_girls_enrolled_help_text_v2
class FirstDayOfMay(date):
@classmethod
def today(cls):
return date(2017, 5, 1)
class FirstDayOfMayDate(date):
@classmethod
def today(cls):
return date(2017, 5, 1)
class SecondDayOfMay(date):
@classmethod
def today(cls):
return date(2017, 5, 2)
class TestAWCReport(TestCase):
def test_beneficiary_details_recorded_weight_none(self):
data = get_beneficiary_details(
case_id='6b234c5b-883c-4849-9dfd-b1571af8717b',
awc_id='a50',
selected_month=(2017, 6, 1)
)
self.assertEqual(data['age_in_months'], 69)
self.assertEqual(data['sex'], 'M')
self.assertEqual(data['person_name'], 'Name 3342')
self.assertEqual(data['mother_name'], 'संगीता')
def test_beneficiary_details_recorded_weight_is_not_none(self):
data = get_beneficiary_details(
case_id='8e226cc6-740f-4146-b017-69d9f6e9651b',
awc_id='a21',
selected_month=(2017, 6, 1)
)
self.assertEqual(data['age_in_months'], 54)
self.assertEqual(data['sex'], 'M')
self.assertEqual(data['person_name'], 'Name 3141')
self.assertEqual(data['mother_name'], 'शियामु बाई')
self.assertEqual(next(filter(lambda r: r['x'] == 53, data['weight']))['y'], 12.6)
self.assertEqual(next(filter(lambda r: r['x'] == 53, data['height']))['y'], 96.0)
self.assertEqual(next(filter(lambda r: r['x'] == 96.0, data['wfl']))['y'], 12.6)
def test_beneficiary_details_have_age_in_month_not_have_recorded_height(self):
data = get_beneficiary_details(
case_id='411c4234-8475-415a-9c28-911b85868aa5',
awc_id='a15',
selected_month=(2017, 6, 1)
)
self.assertEqual(data['age_in_months'], 37)
self.assertEqual(data['sex'], 'F')
self.assertEqual(data['person_name'], 'Name 3483')
self.assertEqual(data['mother_name'], 'रींकीकुँवर')
def test_beneficiary_details_status_active(self):
data = get_beneficiary_details(
case_id='411c4234-8475-415a-9c28-911b85868aa5',
awc_id='a15',
selected_month=(2017, 6, 1)
)
self.assertEqual(data['beneficiary_status'], 'Active')
def test_beneficiary_details_status_migrated(self):
data = get_beneficiary_details(
case_id='625adb33-c67e-4151-93c7-64f28c988388',
awc_id='a7',
selected_month=(2017, 5, 1)
)
self.assertEqual(data['age_in_months'], 47)
self.assertEqual(data['sex'], 'M')
self.assertEqual(data['person_name'], 'Name 1783')
self.assertEqual(data['mother_name'], 'रेरवा')
self.assertEqual(data['beneficiary_status'], 'Migrated')
def test_awc_reports_system_usage_AWC_days_open(self):
self.assertDictEqual(
get_awc_reports_system_usage(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
(2017, 3, 1),
'aggregation_level'
)['kpi'][0][0],
{
"all": "",
"format": "number",
"percent": 100.0,
"value": 18,
"label": "AWC Days Open",
"frequency": "month",
"help_text": "The total number of days the AWC is open in the given month. "
"The AWC is expected to be open 6 days a week"
" (Not on Sundays and public holidays)"
}
)
def test_awc_reports_system_usage_percentage_of_eligible_children_ICDS_beneficiaries_between_0_6_years(self):
self.assertDictEqual(
get_awc_reports_system_usage(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
(2017, 3, 1),
'aggregation_level'
)['kpi'][0][1],
{
"all": 0,
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Percentage of eligible children (ICDS beneficiaries between 0-6 years)"
" who have been weighed in the current month",
"frequency": "month",
"help_text": "Percentage of AWCs with a functional toilet"
}
)
def test_awc_reports_system_usage_kpi_length(self):
self.assertEqual(
len(get_awc_reports_system_usage(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
(2017, 3, 1),
'aggregation_level'
)['kpi']),
1
)
def test_awc_reports_system_usage_kpi_total_length(self):
data = get_awc_reports_system_usage(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
(2017, 3, 1),
'aggregation_level'
)['kpi']
self.assertEqual(
sum([len(record_row) for record_row in data]),
2
)
def test_awc_reports_system_usage_AWC_days_open_per_week_chart(self):
self.assertEqual(
get_awc_reports_system_usage(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
(2017, 3, 1),
'aggregation_level'
)['charts'][0],
[
{
"classed": "dashed",
"values": [
[
1491523200000,
1
],
[
1491609600000,
1
],
[
1491782400000,
1
],
[
1491955200000,
1
],
[
1492473600000,
1
],
[
1492732800000,
1
],
[
1492992000000,
1
],
[
1493078400000,
1
],
[
1493251200000,
1
]
],
"key": "AWC Days Open Per Week"
}
]
)
def test_awc_reports_system_usage_PSE_average_weekly_attendance(self):
self.assertEqual(
get_awc_reports_system_usage(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
(2017, 3, 1),
'aggregation_level'
)['charts'][1],
[
{
"classed": "dashed",
"values": [
[
1491523200000,
0.65625
],
[
1491609600000,
0.64516129
],
[
1491782400000,
0.677419355
],
[
1491955200000,
0.612903226
],
[
1492473600000,
0.612903226
],
[
1492732800000,
0.64516129
],
[
1492992000000,
0.64516129
],
[
1493078400000,
0.64516129
],
[
1493251200000,
0.64516129
]
],
"key": "PSE- Average Weekly Attendance"
}
]
)
def test_awc_reports_system_usage_length(self):
self.assertEqual(
len(get_awc_reports_system_usage(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
(2017, 3, 1),
'aggregation_level'
)['charts']),
2
)
def test_awc_reports_system_usage_keys(self):
self.assertEqual(
list(get_awc_reports_system_usage(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
(2017, 3, 1),
'aggregation_level'
).keys()),
['kpi', 'charts']
)
def test_awc_reports_pse_images_0(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['images'][0],
[
{
"date": "01/05/2017",
"image": None,
"id": 0
},
{
"date": "02/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"00a368e6-e88f-41ee-96aa-25a8ec5ab3d6/1493703284010.jpg",
"id": 1
},
{
"date": "03/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"ef336dda-12a1-42a4-9bee-405d17c2aba8/1493790538044.jpg",
"id": 2
},
{
"date": "04/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"00ec149e-c1a9-4083-a73c-cdc39df17137/1493876634200.jpg",
"id": 3
}
]
)
def test_awc_reports_pse_images_1(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['images'][1],
[
{
"date": "05/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"ebb1f3c8-34c7-4ed1-9f35-0b209cb4d683/1493959451474.jpg",
"id": 4
},
{
"date": "06/05/2017",
"image": None,
"id": 5
},
{
"date": "07/05/2017",
"image": None,
"id": 6
},
{
"date": "08/05/2017",
"image": None,
"id": 7
}
]
)
def test_awc_reports_pse_images_2(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['images'][2],
[
{
"date": "09/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"eb20b019-97ef-45e0-9698-fda3d964a096/1494308187855.jpg",
"id": 8
},
{
"date": "10/05/2017",
"image": None,
"id": 9
},
{
"date": "11/05/2017",
"image": None,
"id": 10
},
{
"date": "12/05/2017",
"image": None,
"id": 11
}
]
)
def test_awc_reports_pse_images_3(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['images'][3],
[
{
"date": "13/05/2017",
"image": None,
"id": 12
},
{
"date": "14/05/2017",
"image": None,
"id": 13
},
{
"date": "15/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"036ab123-0a1e-43b6-8e7d-4bcf9abcdfa2/1494826363729.jpg",
"id": 14
},
{
"date": "16/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"dda9c427-4ba7-4f90-9c5b-d2a02cff9e31/1494911839185.jpg",
"id": 15
}
]
)
def test_awc_reports_pse_images_4(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['images'][4],
[
{
"date": "17/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"1be8a49b-c63c-4288-bcb2-9e5bf132834f/1494997946602.jpg",
"id": 16
},
{
"date": "18/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"c7f6d174-1218-4f8e-ab84-f80e17b1ebdb/1495084707730.jpg",
"id": 17
},
{
"date": "19/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"416990d9-f354-457f-8c52-1866e98840f5/1495173038810.jpg",
"id": 18
},
{
"date": "20/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"3fea99f8-c6f4-48c9-9386-152639fe1b17/1495259635314.jpg",
"id": 19
}
]
)
def test_awc_reports_pse_images_5(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['images'][5],
[
{
"date": "21/05/2017",
"image": None,
"id": 20
},
{
"date": "22/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"ce528857-f34e-4785-913f-41d221fbeed8/1495432106324.jpg",
"id": 21
},
{
"date": "23/05/2017",
"image": None,
"id": 22
},
{
"date": "24/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"5d0f2aa4-6d5b-424f-91d1-c4afb2d0555b/1495605536823.jpg",
"id": 23
}
]
)
def test_awc_reports_pse_images_6(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['images'][6],
[
{
"date": "25/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"20e4d641-a85a-4927-96ab-994fa46a8ea0/1495690578649.jpg",
"id": 24
},
{
"date": "26/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"f86e701b-1531-469f-8996-705e297bf498/1495776461721.jpg",
"id": 25
},
{
"date": "27/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"6701b39d-4b6f-4ae3-8a88-eadb61b1a105/1495865744995.jpg",
"id": 26
},
{
"date": "28/05/2017",
"image": None,
"id": 27
}
]
)
def test_awc_reports_pse_images_7(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['images'][7],
[
{
"date": "29/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"6376d77d-bb2a-48ac-9042-7892dda97bba/1496036503892.jpg",
"id": 28
},
{
"date": "30/05/2017",
"image": "http://localhost:8000/a/icds-cas/icds_dashboard/icds_image_accessor/"
"c0d002ca-f7b0-4bd2-a531-881b46610c2f/1496120210768.jpg",
"id": 29
},
{
"date": "31/05/2017",
"image": None,
"id": 30
}
]
)
def test_awc_reports_pse_images_length(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
len(data['images']),
8
)
def test_awc_reports_pse_kpi(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['kpi'],
[
[
{
"color": "green",
"all": "",
"frequency": "month",
"format": "number",
"percent": 100.0,
"value": 18,
"label": "AWC Days Open"
}
]
]
)
def test_awc_reports_pse_charts_0(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['charts'][0],
[
{
"color": "#006fdf",
"classed": "dashed",
"strokeWidth": 2,
"values": [
{
"y": 4,
"x": 1493596800000
},
{
"y": 1,
"x": 1494201600000
},
{
"y": 6,
"x": 1494806400000
},
{
"y": 5,
"x": 1495411200000
},
{
"y": 2,
"x": 1496016000000
}
],
"key": "AWC Days Open per week"
}
]
)
def test_awc_reports_pse_charts_1(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
data['charts'][1],
[
{
"color": "#006fdf",
"classed": "dashed",
"strokeWidth": 2,
"values": [
{
"y": 0,
"x": 1493596800000,
"attended": 0,
"eligible": 0
},
{
"y": 0.741935484,
"x": 1493683200000,
"attended": 23,
"eligible": 31
},
{
"y": 0.806451613,
"x": 1493769600000,
"attended": 25,
"eligible": 31
},
{
"y": 0.8,
"x": 1493856000000,
"attended": 24,
"eligible": 30
},
{
"y": 0.8,
"x": 1493942400000,
"attended": 24,
"eligible": 30
},
{
"y": 0,
"x": 1494028800000,
"attended": 0,
"eligible": 0
},
{
"y": 0,
"x": 1494115200000,
"attended": 0,
"eligible": 0
},
{
"y": 0,
"x": 1494201600000,
"attended": 0,
"eligible": 0
},
{
"y": 0.8,
"x": 1494288000000,
"attended": 24,
"eligible": 30
},
{
"y": 0,
"x": 1494374400000,
"attended": 0,
"eligible": 0
},
{
"y": 0,
"x": 1494460800000,
"attended": 0,
"eligible": 0
},
{
"y": 0,
"x": 1494547200000,
"attended": 0,
"eligible": 0
},
{
"y": 0,
"x": 1494633600000,
"attended": 0,
"eligible": 0
},
{
"y": 0,
"x": 1494720000000,
"attended": 0,
"eligible": 0
},
{
"y": 1.0,
"x": 1494806400000,
"attended": 30,
"eligible": 30
},
{
"y": 0.666666667,
"x": 1494892800000,
"attended": 20,
"eligible": 30
},
{
"y": 0.733333333,
"x": 1494979200000,
"attended": 22,
"eligible": 30
},
{
"y": 0.766666667,
"x": 1495065600000,
"attended": 23,
"eligible": 30
},
{
"y": 0.666666667,
"x": 1495152000000,
"attended": 20,
"eligible": 30
},
{
"y": 0.633333333,
"x": 1495238400000,
"attended": 19,
"eligible": 30
},
{
"y": 0,
"x": 1495324800000,
"attended": 0,
"eligible": 0
},
{
"y": 0.666666667,
"x": 1495411200000,
"attended": 20,
"eligible": 30
},
{
"y": 0,
"x": 1495497600000,
"attended": 0,
"eligible": 0
},
{
"y": 0.666666667,
"x": 1495584000000,
"attended": 20,
"eligible": 30
},
{
"y": 0.666666667,
"x": 1495670400000,
"attended": 20,
"eligible": 30
},
{
"y": 0.666666667,
"x": 1495756800000,
"attended": 20,
"eligible": 30
},
{
"y": 0.666666667,
"x": 1495843200000,
"attended": 20,
"eligible": 30
},
{
"y": 0,
"x": 1495929600000,
"attended": 0,
"eligible": 0
},
{
"y": 0.655172414,
"x": 1496016000000,
"attended": 19,
"eligible": 29
},
{
"y": 1.0,
"x": 1496102400000,
"attended": 29,
"eligible": 29
},
{
"y": 0,
"x": 1496188800000,
"attended": 0,
"eligible": 0
}
],
"key": "PSE - Daily Attendance"
}
]
)
def test_awc_reports_pse_charts_length(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
len(data['charts']),
2
)
def test_awc_reports_pse_map(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertDictEqual(
data['map'],
{
"markers": {}
}
)
def test_awc_reports_pse_keys(self):
data = get_awc_reports_pse(
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
'icds-cas'
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertItemsEqual(
data,
["images", "kpi", "charts", "map"]
)
def test_awc_reports_maternal_child_underweight_weight_for_age(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][0][0],
{
"color": "red",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Underweight (Weight-for-Age)",
"help_text": (
"Of the total children weighed, the percentage of children between 0-5 years who were "
"moderately/severely underweight in the current month. Children who are moderately or "
"severely underweight have a higher risk of mortality. "
)
}
)
def test_awc_reports_maternal_child_wasting_weight_for_height(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][0][1],
{
"color": "red",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Wasting (Weight-for-Height)",
"help_text": wasting_help_text("0 - 5 years")
}
)
def test_awc_reports_maternal_child_stunting_height_for_age(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][1][0],
{
"color": "red",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Stunting (Height-for-Age)",
"help_text": (
"Of the children whose height was measured, the percentage of children between "
"0 - 5 years who were moderately/severely stunted in the current month."
"<br/><br/>"
"Stunting is a sign of chronic undernutrition and has long lasting harmful consequences "
"on the growth of a child"
)
}
)
def test_awc_reports_maternal_child_wasting_weight_for_height_icds_features_flag(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
icds_feature_flag=True
)
self.assertDictEqual(
data['kpi'][0][1],
{
"color": "red",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Wasting (Weight-for-Height)",
"help_text": wasting_help_text("0 - 5 years")
}
)
def test_awc_reports_maternal_child_stunting_height_for_age_icds_features_flag(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
icds_feature_flag=True
)
self.assertDictEqual(
data['kpi'][1][0],
{
"color": "red",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Stunting (Height-for-Age)",
"help_text": (
"Of the children whose height was measured, the percentage of children between "
"0 - 5 years who were moderately/severely stunted in the current month."
"<br/><br/>"
"Stunting is a sign of chronic undernutrition and has long lasting harmful consequences "
"on the growth of a child"
)
}
)
def test_awc_reports_maternal_child_weighing_efficiency(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][1][1],
{
"color": "green",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Weighing Efficiency",
'help_text': "Of the children between the ages of 0-5 years who are enrolled for Anganwadi "
"Services, the percentage who were weighed in the given month. ",
}
)
def test_awc_reports_maternal_child_newborns_with_low_birth_weight(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][2][0],
{
"color": "red",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Newborns with Low Birth Weight",
'help_text': (
new_born_with_low_weight_help_text(html=False)
),
}
)
def test_awc_reports_maternal_child_early_initiation_of_breastfeeding(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][2][1],
{
"color": "green",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Early Initiation of Breastfeeding",
'help_text': early_initiation_breastfeeding_help_text(),
}
)
def test_awc_reports_maternal_child_exclusive_breastfeeding(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][3][0],
{
"color": "green",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Exclusive breastfeeding",
'help_text': exclusive_breastfeeding_help_text(),
}
)
def test_awc_reports_maternal_child_children_initiated_appropriate_complementary_feeding(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][3][1],
{
"color": "green",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Children initiated appropriate Complementary Feeding",
'help_text': children_initiated_appropriate_complementary_feeding_help_text(),
}
)
def test_awc_reports_maternal_child_immunization_coverage_at_age_1_year(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][4][0],
{
"color": "green",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Immunization Coverage (at age 1 year)",
'help_text': (
"Of the total number of children enrolled for Anganwadi Services who are over a year old, "
"the percentage of children who have received the complete immunization as per the National "
"Immunization Schedule of India that is required by age 1."
"<br/><br/>"
" This includes the following immunizations:<br/>"
" If Pentavalent path: Penta1/2/3, OPV1/2/3, BCG, Measles, VitA1<br/>"
" If DPT/HepB path: DPT1/2/3, HepB1/2/3, OPV1/2/3, BCG, Measles, VitA1"
),
}
)
def test_awc_reports_maternal_child_institutional_deliveries(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
self.assertDictEqual(
data['kpi'][4][1],
{
"color": "green",
"all": 0,
"frequency": "month",
"format": "percent_and_div",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": "Institutional Deliveries",
'help_text': institutional_deliveries_help_text(),
}
)
def test_awc_reports_maternal_child_kpi_length(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
len(data['kpi']),
5
)
def test_awc_reports_maternal_child_kpi_total_length(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)['kpi']
self.assertEqual(
sum([len(record_row) for record_row in data]),
10
)
def test_awc_reports_maternal_child_keys(self):
data = get_awc_reports_maternal_child(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 1),
(2017, 4, 1),
)
for kpi in data['kpi']:
for el in kpi:
del el['help_text']
self.assertEqual(
list(data.keys()),
['kpi']
)
def test_awc_reports_demographics_monthly_registered_households(self):
self.assertDictEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
)['kpi'][0][0],
{
"all": "",
"format": "number",
"color": "green",
"percent": 'Data in the previous reporting period was 0',
"value": 0,
"label": "Registered Households",
"frequency": "month",
"help_text": "Total number of households registered"
}
)
def test_awc_reports_demographics_monthly_percent_aadhaar_seeded_beneficiaries(self):
self.assertDictEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
)['kpi'][0][1],
{
"all": 5,
'color': 'red',
"format": "percent_and_div",
"percent": -39.99999999999999,
"value": 1,
"label": AADHAR_SEEDED_BENEFICIARIES,
"frequency": "month",
"help_text": percent_aadhaar_seeded_beneficiaries_help_text()
}
)
def test_awc_reports_demographics_monthly_percent_children_0_6_years_enrolled_for_anganwadi_services(self):
self.assertDictEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
)['kpi'][1][0],
{
"all": 0,
"format": "percent_and_div",
"color": "green",
"percent": "Data in the previous reporting period was 0",
"value": 0,
"label": CHILDREN_ENROLLED_FOR_ANGANWADI_SERVICES,
"frequency": "month",
"help_text": percent_children_enrolled_help_text()
}
)
def test_awc_reports_demographics_monthly_percent_pregnant_women_enrolled_for_anganwadi_services(self):
self.assertDictEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
)['kpi'][1][1],
{
"all": 2,
"format": "percent_and_div",
"color": "red",
"percent": 0,
"value": 2,
"label": PREGNANT_WOMEN_ENROLLED_FOR_ANGANWADI_SERVICES,
"frequency": "month",
"help_text": percent_pregnant_women_enrolled_help_text()
}
)
def test_awc_reports_demographics_monthly_percent_lactating_women_enrolled_for_anganwadi_services(self):
self.assertDictEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
)['kpi'][2][0],
{
"all": 3,
"format": "percent_and_div",
"color": "red",
"percent": 0,
"value": 3,
"label": LACTATING_WOMEN_ENROLLED_FOR_ANGANWADI_SERVICES,
"frequency": "month",
"help_text": percent_lactating_women_enrolled_help_text()
}
)
def test_awc_reports_demographics_monthly_percent_adolescent_girls_11_14_years_enrolled_for_services(self):
self.assertDictEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
)['kpi'][2][1],
{
"all": 0,
"format": "percent_and_div",
"color": "green",
"percent": 'Data in the previous reporting period was 0',
"value": 0,
"label": OUT_OF_SCHOOL_ADOLESCENT_GIRLS_11_14_YEARS,
"frequency": "month",
"help_text": percent_adolescent_girls_enrolled_help_text_v2()
}
)
def test_awc_reports_demographics_monthly_kpi_length(self):
self.assertEqual(
len(get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
)['kpi']),
3
)
def test_awc_reports_demographics_monthly_kpi_total_length(self):
data = get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
)['kpi']
self.assertEqual(
sum([len(record_row) for record_row in data]),
6
)
def test_awc_reports_demographics_monthly_chart(self):
self.assertEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
)['chart'],
[
{
"values": [
[
"0-1 month",
0
],
[
"1-6 months",
0
],
[
"6-12 months",
0
],
[
"1-3 years",
0
],
[
"3-6 years",
0
]
],
"classed": "dashed",
"key": "Children (0-6 years)"
}
]
)
def test_awc_reports_demographics_monthly_keys(self):
self.assertItemsEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 6, 1),
(2017, 5, 1),
),
['kpi', 'chart']
)
def test_awc_reports_demographics_daily_kpi_length(self):
self.assertEqual(
len(get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 29),
(2017, 5, 1),
)['kpi']),
3
)
def test_awc_reports_demographics_daily_kpi_total_length(self):
data = get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 29),
(2017, 5, 1),
)['kpi']
self.assertEqual(
sum([len(record_row) for record_row in data]),
6
)
def test_awc_reports_demographics_daily_chart(self):
self.assertEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 29),
(2017, 5, 1),
)['chart'],
[{
"values": [
["0-1 month", 0],
["1-6 months", 0],
["6-12 months", 0],
["1-3 years", 0],
["3-6 years", 0]
],
"classed": "dashed",
"key": "Children (0-6 years)"
}]
)
def test_awc_reports_demographics_daily_keys(self):
self.assertItemsEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 29),
(2017, 5, 1),
),
['kpi', 'chart']
)
def test_awc_reports_demographics_daily_if_aggregation_script_fail_kpi_length(self):
self.assertEqual(
len(get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 30),
(2017, 5, 1),
)['kpi']),
3
)
def test_awc_reports_demographics_daily_if_aggregation_script_fail_kpi_total_length(self):
data = get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 30),
(2017, 5, 1),
)['kpi']
self.assertEqual(
sum([len(record_row) for record_row in data]),
6
)
def test_awc_reports_demographics_daily_if_aggregation_script_fail_chart(self):
self.assertEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 30),
(2017, 5, 1),
)['chart'],
[{
"values": [
["0-1 month", 0],
["1-6 months", 0],
["6-12 months", 0],
["1-3 years", 0],
["3-6 years", 0]
],
"classed": "dashed",
"key": "Children (0-6 years)"
}]
)
def test_awc_reports_demographics_daily_if_aggregation_script_fail_keys(self):
self.assertItemsEqual(
get_awc_report_demographics(
'icds-cas',
{
'state_id': 'st1',
'district_id': 'd1',
'block_id': 'b1',
'awc_id': 'a1',
'aggregation_level': 5
},
(2017, 5, 30),
(2017, 5, 1),
),
['kpi', 'chart']
)
def _get_beneficiary(self, case_id):
return [
row
for row in get_awc_report_beneficiary(
0, 100, 1, 'dob', {'awc_id': 'a18'}, (2017, 5, 1), (2017, 3, 1), False)['data']
if row['case_id'] == case_id
][0]
def test_awc_report_beneficiary_645fd452_3732_44fb_a2d3_46162304807e(self):
data = self._get_beneficiary('645fd452-3732-44fb-a2d3-46162304807e')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '9.9000000000000000',
'age_in_months': 17,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': None,
'dob': '2015-12-15',
'age': '1 year 5 months ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': '645fd452-3732-44fb-a2d3-46162304807e',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 1237',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_9ca36787_bed9_4af0_a13e_fca1c9cad360(self):
data = self._get_beneficiary('9ca36787-bed9-4af0-a13e-fca1c9cad360')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '6.2000000000000000',
'age_in_months': 5,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': None,
'dob': '2016-12-16',
'age': '5 months ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': '9ca36787-bed9-4af0-a13e-fca1c9cad360',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 1303',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_7673a69c_29af_478c_85c6_9c3b22f6b2e4(self):
data = self._get_beneficiary('7673a69c-29af-478c-85c6-9c3b22f6b2e4')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '11.0000000000000000',
'age_in_months': 14,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': None,
'dob': '2016-03-06',
'age': '1 year 2 months ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': '7673a69c-29af-478c-85c6-9c3b22f6b2e4',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 1305',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_d5d3fbeb_8b6a_486b_a853_30be35589200(self):
data = self._get_beneficiary('d5d3fbeb-8b6a-486b-a853-30be35589200')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '7.0000000000000000',
'age_in_months': 7,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': None,
'dob': '2016-10-05',
'age': '7 months ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': 'd5d3fbeb-8b6a-486b-a853-30be35589200',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 1341',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_b954eb28_75de_43c8_9ec0_d38b7d246ead(self):
data = self._get_beneficiary('b954eb28-75de-43c8-9ec0-d38b7d246ead')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '19.0000000000000000',
'age_in_months': 59,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': 1,
'dob': '2012-06-26',
'age': '4 years 11 months ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': 'b954eb28-75de-43c8-9ec0-d38b7d246ead',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 2617',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_532f3754_e231_40ec_a861_abbb2a06dff5(self):
data = self._get_beneficiary('6faecfe6-cc88-4ff0-9b3d-d8ca069dd06f')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '4.0000000000000000',
'age_in_months': 2,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': None,
'dob': '2017-03-19',
'age': '2 months ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': '6faecfe6-cc88-4ff0-9b3d-d8ca069dd06f',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 2917',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_3b242a3b_693e_44dd_ad4a_b713efdb0fdb(self):
data = self._get_beneficiary('3b242a3b-693e-44dd-ad4a-b713efdb0fdb')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '14.3000000000000000',
'age_in_months': 45,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': 13,
'dob': '2013-08-22',
'age': '3 years 9 months ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': '3b242a3b-693e-44dd-ad4a-b713efdb0fdb',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 4398',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_4cd07ebf_abce_4345_a930_f6db7ede8996(self):
data = self._get_beneficiary('4cd07ebf-abce-4345-a930-f6db7ede8996')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '14.5000000000000000',
'age_in_months': 57,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': 9,
'dob': '2012-08-24',
'age': '4 years 9 months ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': '4cd07ebf-abce-4345-a930-f6db7ede8996',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 4399',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_0198ec4a_f5ed_4452_863c_a400f43d238a(self):
data = self._get_beneficiary('0198ec4a-f5ed-4452-863c-a400f43d238a')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '13.3000000000000000',
'age_in_months': 49,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': 11,
'dob': '2013-05-01',
'age': '4 years ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': '0198ec4a-f5ed-4452-863c-a400f43d238a',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 4400',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_a9dc5cac_6820_45cf_b8c9_16f2cfb0ae02(self):
data = self._get_beneficiary('a9dc5cac-6820-45cf-b8c9-16f2cfb0ae02')
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps(
{
'recorded_weight': '6.8000000000000000',
'age_in_months': 6,
'current_month_stunting': {'color': 'black', 'value': 'Data Not Entered'},
'pse_days_attended': None,
'dob': '2016-11-16',
'age': '6 months ',
'current_month_wasting': {'color': 'black', 'value': 'Data Not Entered'},
'current_month_nutrition_status': {'color': 'black', 'value': 'Normal weight for age'},
'case_id': 'a9dc5cac-6820-45cf-b8c9-16f2cfb0ae02',
'recorded_height': 0,
'fully_immunized': 'No',
'person_name': 'Name 1191',
'aww_phone_number': None,
'mother_phone_number': None,
'beneficiary_status': 'Active'
},
cls=DjangoJSONEncoder
)
)
def test_awc_report_beneficiary_data_length(self):
data = get_awc_report_beneficiary(0, 10, 1, 'dob', {'awc_id': 'a18'}, (2017, 5, 1), (2017, 3, 1), False)
self.assertEqual(
len(data['data']),
10
)
def test_awc_report_beneficiary_data_without_data(self):
data = get_awc_report_beneficiary(0, 10, 1, 'dob', {'awc_id': 'a18'}, (2017, 5, 1), (2017, 3, 1), False)
del data['data']
self.assertJSONEqual(
json.dumps(data, cls=DjangoJSONEncoder),
json.dumps({
"draw": 1,
"last_month": "May 2017",
"recordsTotal": 32,
"months": [
"May 2017",
"Apr 2017",
"Mar 2017"
],
"recordsFiltered": 32,
}, cls=DjangoJSONEncoder)
)
def test_awc_report_beneficiary_keys(self):
data = get_awc_report_beneficiary(0, 10, 1, 'dob', {'awc_id': 'a18'}, (2017, 5, 1), (2017, 3, 1), False)
self.assertItemsEqual(
data,
['draw', 'last_month', 'recordsTotal', 'months', 'recordsFiltered', 'data']
)
def test_awc_report_pregnant_first_record(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_awc_report_pregnant(
start=0,
length=10,
order='age',
reversed_order=False,
awc_id='a15'
)
self.assertEqual(
len(data['data']),
2
)
self.assertEqual(
data['data'][0],
{
'age': 23,
'closed': None,
'beneficiary': 'Yes',
'anemic': 'Data Not Entered',
'case_id': '7313c174-6b63-457c-a734-6eed0a2b2ac6',
'edd': datetime.date(2017, 8, 31),
'last_date_thr': None,
'num_anc_complete': None,
'number_of_thrs_given': 0,
'opened_on': datetime.date(2017, 5, 12),
'person_name': None,
'trimester': 2,
}
)
def test_pregnant_details_first_record_first_trimester(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_pregnant_details(
case_id='7313c174-6b63-457c-a734-6eed0a2b2ac6',
awc_id='a15'
)
self.assertEqual(
data['data'][0],
[]
)
def test_pregnant_details_first_record_second_trimester(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_pregnant_details(
case_id='7313c174-6b63-457c-a734-6eed0a2b2ac6',
awc_id='a15'
)
self.assertEqual(
data['data'][1],
[
{'opened_on': datetime.date(2017, 5, 12),
'tt_taken': 'N',
'person_name': 'Data Not Entered',
'anc_weight': 'Data Not Entered',
'edd': datetime.date(2017, 8, 31),
'age': 23,
'tt_date': 'None',
'anc_hemoglobin': 'Data Not Entered',
'symptoms': 'None',
'preg_order': 'Data Not Entered',
'using_ifa': 'Y',
'case_id': '7313c174-6b63-457c-a734-6eed0a2b2ac6',
'bp': 'Data Not Entered',
'ifa_consumed_last_seven_days': 'Y',
'mobile_number': 'Data Not Entered',
'trimester': 2,
'counseling': 'Eating Extra, Taking Rest',
'anc_abnormalities': 'None',
'anemic': 'Data Not Entered',
'home_visit_date': datetime.date(2017, 5, 4)}]
)
def test_pregnant_details_first_record_third_trimester(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_pregnant_details(
case_id='7313c174-6b63-457c-a734-6eed0a2b2ac6',
awc_id='a15'
)
self.assertEqual(
data['data'][2],
[]
)
def test_awc_report_lactating_first_record(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_awc_report_lactating(
start=0,
length=10,
order='age',
reversed_order=False,
awc_id='a50'
)
self.assertEqual(
data['data'][0],
{
'num_rations_distributed': 0,
'institutional_delivery': 'No',
'person_name': None,
'delivery_nature': 'Data Not Entered',
'age': 20,
'num_pnc_visits': None,
'add': datetime.date(2017, 3, 1),
'case_id': '36d5e223-a631-4030-910c-262a1d066fb3',
'breastfed_at_birth': 'No',
'is_ebf': 'No'}
)
def test_awc_report_lactating_second_record(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_awc_report_lactating(
start=0,
length=10,
order='age',
reversed_order=False,
awc_id='a50'
)
self.assertEqual(
data['data'][1],
{
'num_rations_distributed': 6,
'institutional_delivery': 'No',
'person_name': None,
'delivery_nature': 'Data Not Entered',
'age': 23,
'num_pnc_visits': None,
'add': datetime.date(2017, 4, 20),
'case_id': 'aefb8fe5-1cd1-4235-9baf-963b1a0b498e',
'breastfed_at_birth': 'No',
'is_ebf': 'No'}
)
def test_awc_report_lactating_third_record(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_awc_report_lactating(
start=0,
length=10,
order='age',
reversed_order=False,
awc_id='a50'
)
self.assertEqual(
data['data'][2],
{
'num_rations_distributed': 6,
'institutional_delivery': 'No',
'person_name': None,
'delivery_nature': 'Data Not Entered',
'age': 24,
'num_pnc_visits': None,
'add': datetime.date(2017, 3, 1),
'case_id': '4f0aac21-5b5d-43a6-a1f6-9744d0e66cf2',
'breastfed_at_birth': 'No',
'is_ebf': 'No'}
)
def test_awc_report_lactating_forth_record(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_awc_report_lactating(
start=0,
length=10,
order='age',
reversed_order=False,
awc_id='a50'
)
self.assertEqual(
data['data'][3],
{
'num_rations_distributed': 12,
'institutional_delivery': 'No',
'person_name': None,
'delivery_nature': 'Data Not Entered',
'age': 26,
'num_pnc_visits': None,
'add': datetime.date(2017, 3, 20),
'case_id': '10a53900-f65e-46b7-ae0c-f32a208c0677',
'breastfed_at_birth': 'No',
'is_ebf': 'No'}
)
def test_awc_report_lactating_fifth_record(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_awc_report_lactating(
start=0,
length=10,
order='age',
reversed_order=False,
awc_id='a50'
)
self.assertEqual(
data['data'][4],
{
'num_rations_distributed': 12,
'institutional_delivery': 'No',
'person_name': None,
'delivery_nature': 'Data Not Entered',
'age': 26,
'num_pnc_visits': None,
'add': datetime.date(2017, 3, 1),
'case_id': '1a6851bc-8172-48fc-80d1-b198f23033ab',
'breastfed_at_birth': 'No',
'is_ebf': 'No'}
)
def test_awc_report_lactating_sixth_record(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_awc_report_lactating(
start=0,
length=10,
order='age',
reversed_order=False,
awc_id='a50'
)
self.assertEqual(
data['data'][5],
{
'num_rations_distributed': 6,
'institutional_delivery': 'No',
'person_name': None,
'delivery_nature': 'Data Not Entered',
'age': 26,
'num_pnc_visits': None,
'add': datetime.date(2017, 3, 1),
'case_id': '37c4d26f-eda0-4d9a-bae9-11a17a3ccfaa',
'breastfed_at_birth': 'No',
'is_ebf': 'No'}
)
def test_awc_report_lactating_seventh_record(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', SecondDayOfMay):
data = get_awc_report_lactating(
start=0,
length=10,
order='age',
reversed_order=False,
awc_id='a50'
)
self.assertEqual(
data['data'][6],
{
'num_rations_distributed': 6,
'institutional_delivery': 'No',
'person_name': None,
'delivery_nature': 'Data Not Entered',
'age': 29,
'num_pnc_visits': None,
'add': datetime.date(2017, 3, 1),
'case_id': '1744a035-56f1-4059-86f5-93fcea3c6076',
'breastfed_at_birth': 'No',
'is_ebf': 'No'}
)
def test_awc_report_lactating_on_first_of_month(self):
with mock.patch('custom.icds_reports.reports.awc_reports.date', FirstDayOfMay):
data = get_awc_report_lactating(
start=0,
length=7,
order='age',
reversed_order=False,
awc_id='a50'
)
self.assertListEqual(
data['data'],
[
{'num_rations_distributed': 0, 'person_name': None, 'num_pnc_visits': None,
'age': 20, 'delivery_nature': u'Data Not Entered', 'add': datetime.date(2017, 3, 1),
'case_id': u'36d5e223-a631-4030-910c-262a1d066fb3', 'breastfed_at_birth': u'No',
'is_ebf': u'No', 'institutional_delivery': u'No'},
{'num_rations_distributed': 0, 'person_name': None, 'num_pnc_visits': None,
'age': 23, 'delivery_nature': u'Data Not Entered', 'add': datetime.date(2017, 4, 20),
'case_id': u'aefb8fe5-1cd1-4235-9baf-963b1a0b498e', 'breastfed_at_birth': u'No',
'is_ebf': u'No', 'institutional_delivery': u'No'},
{'num_rations_distributed': 0, 'person_name': None, 'num_pnc_visits': None, 'age': 24,
'delivery_nature': u'Data Not Entered', 'add': datetime.date(2017, 3, 1),
'case_id': u'4f0aac21-5b5d-43a6-a1f6-9744d0e66cf2', 'breastfed_at_birth': u'No',
'is_ebf': u'No', 'institutional_delivery': u'No'},
{'num_rations_distributed': 0, 'person_name': None, 'num_pnc_visits': None, 'age': 26,
'delivery_nature': u'Data Not Entered', 'add': datetime.date(2017, 3, 20),
'case_id': u'10a53900-f65e-46b7-ae0c-f32a208c0677', 'breastfed_at_birth': u'No',
'is_ebf': u'No', 'institutional_delivery': u'No'},
{'num_rations_distributed': 0, 'person_name': None, 'num_pnc_visits': None, 'age': 26,
'delivery_nature': u'Data Not Entered', 'add': datetime.date(2017, 3, 1),
'case_id': u'1a6851bc-8172-48fc-80d1-b198f23033ab', 'breastfed_at_birth': u'No',
'is_ebf': u'No', 'institutional_delivery': u'No'},
{'num_rations_distributed': 0, 'person_name': None, 'num_pnc_visits': None, 'age': 26,
'delivery_nature': u'Data Not Entered', 'add': datetime.date(2017, 3, 1),
'case_id': u'37c4d26f-eda0-4d9a-bae9-11a17a3ccfaa', 'breastfed_at_birth': u'No',
'is_ebf': u'No', 'institutional_delivery': u'No'},
{'num_rations_distributed': 0, 'person_name': None, 'num_pnc_visits': None, 'age': 29,
'delivery_nature': u'Data Not Entered', 'add': datetime.date(2017, 3, 1),
'case_id': u'1744a035-56f1-4059-86f5-93fcea3c6076', 'breastfed_at_birth': u'No',
'is_ebf': u'No', 'institutional_delivery': u'No'}
]
)
| 36.2892
| 113
| 0.4053
| 7,603
| 90,723
| 4.582402
| 0.09391
| 0.031573
| 0.022388
| 0.027325
| 0.835247
| 0.792796
| 0.750603
| 0.706946
| 0.667049
| 0.633123
| 0
| 0.107102
| 0.481917
| 90,723
| 2,499
| 114
| 36.303721
| 0.633845
| 0
| 0
| 0.552818
| 0
| 0.000835
| 0.237746
| 0.049293
| 0
| 0
| 0
| 0
| 0.043006
| 1
| 0.037578
| false
| 0
| 0.003758
| 0.00167
| 0.044676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ace5c421507720d430bab2cfad4fabb8a910178e
| 101
|
py
|
Python
|
api/tacticalrmm/core/admin.py
|
jeffreyvh/tacticalrmm
|
dcfb1732954c2c165e82e6b24686e27f9f909eb3
|
[
"MIT"
] | 1
|
2021-01-19T20:39:02.000Z
|
2021-01-19T20:39:02.000Z
|
api/tacticalrmm/core/admin.py
|
jeffreyvh/tacticalrmm
|
dcfb1732954c2c165e82e6b24686e27f9f909eb3
|
[
"MIT"
] | 5
|
2021-04-08T19:44:31.000Z
|
2021-09-22T19:34:33.000Z
|
api/tacticalrmm/core/admin.py
|
jeffreyvh/tacticalrmm
|
dcfb1732954c2c165e82e6b24686e27f9f909eb3
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import CoreSettings
admin.site.register(CoreSettings)
| 20.2
| 33
| 0.841584
| 13
| 101
| 6.538462
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09901
| 101
| 4
| 34
| 25.25
| 0.934066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ace7dd7dde0f69a5e06447c7c97702f6650deefa
| 169
|
py
|
Python
|
utils/rules/base.py
|
18645956947/TripleIE
|
326e0844ed2cd167a084658bd89703ed94a6e484
|
[
"MIT"
] | null | null | null |
utils/rules/base.py
|
18645956947/TripleIE
|
326e0844ed2cd167a084658bd89703ed94a6e484
|
[
"MIT"
] | 1
|
2019-04-02T06:51:07.000Z
|
2019-04-02T11:14:38.000Z
|
utils/rules/base.py
|
18645956947/TripleIE
|
326e0844ed2cd167a084658bd89703ed94a6e484
|
[
"MIT"
] | 1
|
2019-04-02T02:11:08.000Z
|
2019-04-02T02:11:08.000Z
|
import abc
class Base():
def __init__(self, sentence):
self.sentence = sentence
# 获取规则
@abc.abstractmethod
def get_result(self):
pass
| 14.083333
| 33
| 0.609467
| 19
| 169
| 5.157895
| 0.684211
| 0.244898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.301775
| 169
| 11
| 34
| 15.363636
| 0.830508
| 0.023669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0.142857
| 0.142857
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
aceec761eb86742128588c1bdf0298cdafcce988
| 192
|
py
|
Python
|
Aula 22 – Módulos e Pacotes/uteis/numeros/__init__.py
|
Guilherme-Artigas/Python-avancado
|
287e23ac3df181ff84bf5fae8ab925a4433dceb0
|
[
"MIT"
] | null | null | null |
Aula 22 – Módulos e Pacotes/uteis/numeros/__init__.py
|
Guilherme-Artigas/Python-avancado
|
287e23ac3df181ff84bf5fae8ab925a4433dceb0
|
[
"MIT"
] | null | null | null |
Aula 22 – Módulos e Pacotes/uteis/numeros/__init__.py
|
Guilherme-Artigas/Python-avancado
|
287e23ac3df181ff84bf5fae8ab925a4433dceb0
|
[
"MIT"
] | null | null | null |
def fatorial(p1):
f = 1
indice = p1
while indice >= 1:
f *= indice
indice -= 1
return f
def dobro(p1):
return p1 * 2
def triplo(p1):
return p1 * 3
| 11.294118
| 22
| 0.494792
| 28
| 192
| 3.392857
| 0.428571
| 0.147368
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095652
| 0.401042
| 192
| 16
| 23
| 12
| 0.730435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0
| 0.181818
| 0.545455
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
4a0bde8f3edbbafaf3d987a0d6e3e19e1cd29b3d
| 6,060
|
py
|
Python
|
orion/evaluation/point.py
|
PSFC-HEDP/Orion
|
37535c788112df346bb9d3a13255f58f2479d4bc
|
[
"MIT"
] | 543
|
2020-06-16T21:48:43.000Z
|
2021-10-04T01:56:27.000Z
|
orion/evaluation/point.py
|
PSFC-HEDP/Orion
|
37535c788112df346bb9d3a13255f58f2479d4bc
|
[
"MIT"
] | 147
|
2020-05-20T02:22:26.000Z
|
2021-10-12T05:28:56.000Z
|
orion/evaluation/point.py
|
PSFC-HEDP/Orion
|
37535c788112df346bb9d3a13255f58f2479d4bc
|
[
"MIT"
] | 98
|
2020-08-13T11:29:51.000Z
|
2021-10-04T18:59:09.000Z
|
from orion.evaluation.common import _accuracy, _f1_score, _precision, _recall, _weighted_segment
def _point_partition(expected, observed, start=None, end=None):
expected = set(expected)
observed = set(observed)
edge_start = min(expected.union(observed))
if start is not None:
edge_start = start
edge_end = max(expected.union(observed))
if end is not None:
edge_end = end
length = int(edge_end) - int(edge_start) + 1
expected_parts = [0] * length
observed_parts = [0] * length
for edge in expected:
expected_parts[edge - edge_start] = 1
for edge in observed:
observed_parts[edge - edge_start] = 1
return expected_parts, observed_parts, None
def point_confusion_matrix(expected, observed, data=None, start=None, end=None):
"""Compute the confusion matrix between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
tuple:
number of true negative, false positive, false negative, true positive.
"""
def _ws(x, y, z, w):
return _weighted_segment(x, y, _point_partition, z, w)
if data is not None:
start = data['timestamp'].min()
end = data['timestamp'].max()
if not isinstance(expected, list):
expected = list(expected['timestamp'])
if not isinstance(observed, list):
observed = list(observed['timestamp'])
return _ws(expected, observed, start, end)
def point_accuracy(expected, observed, data=None, start=None, end=None):
"""Compute an accuracy score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Accuracy score between the ground truth and detected anomalies.
"""
return _accuracy(expected, observed, data, start, end, cm=point_confusion_matrix)
def point_precision(expected, observed, data=None, start=None, end=None):
"""Compute an precision score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Precision score between the ground truth and detected anomalies.
"""
return _precision(expected, observed, data, start, end, cm=point_confusion_matrix)
def point_recall(expected, observed, data=None, start=None, end=None):
"""Compute an recall score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
Recall score between the ground truth and detected anomalies.
"""
return _recall(expected, observed, data, start, end, cm=point_confusion_matrix)
def point_f1_score(expected, observed, data=None, start=None, end=None):
"""Compute an f1 score between the ground truth and the detected anomalies.
Args:
expected (DataFrame or list of timestamps):
Ground truth passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
observed (DataFrame or list of timestamps):
Detected anomalies passed as a ``pandas.DataFrame`` or list containing
one column: timestamp.
data (DataFrame):
Original data, passed as a ``pandas.DataFrame`` containing timestamp.
Used to extract start and end.
start (int):
Minimum timestamp of the original data.
end (int):
Maximum timestamp of the original data.
Returns:
float:
F1 score between the ground truth and detected anomalies.
"""
return _f1_score(expected, observed, data, start, end, cm=point_confusion_matrix)
| 36.506024
| 96
| 0.645545
| 727
| 6,060
| 5.313618
| 0.114168
| 0.056951
| 0.07766
| 0.058245
| 0.782294
| 0.768833
| 0.768833
| 0.755889
| 0.755889
| 0.676935
| 0
| 0.00229
| 0.279538
| 6,060
| 165
| 97
| 36.727273
| 0.882501
| 0.623102
| 0
| 0
| 0
| 0
| 0.019017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.189189
| false
| 0
| 0.027027
| 0.027027
| 0.405405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c583531a4d976f5a816d7694d02c2d87eca4796e
| 158
|
py
|
Python
|
src/scan_type/__init__.py
|
corentinmusard/scapy_port_scanner
|
8c41c1c1f6bb1899222c49548d49eb9e01c41516
|
[
"MIT"
] | 4
|
2017-10-31T17:39:51.000Z
|
2018-08-21T18:37:43.000Z
|
src/scan_type/__init__.py
|
corentinmusard/scapy_port_scanner
|
8c41c1c1f6bb1899222c49548d49eb9e01c41516
|
[
"MIT"
] | 2
|
2021-04-20T19:38:54.000Z
|
2021-06-02T01:11:44.000Z
|
src/scan_type/__init__.py
|
corentinmusard/scapy_port_scanner
|
8c41c1c1f6bb1899222c49548d49eb9e01c41516
|
[
"MIT"
] | 1
|
2018-07-21T21:58:33.000Z
|
2018-07-21T21:58:33.000Z
|
from .connect_scan import ConnectScan
from .syn_scan import SynScan
from .ack_scan import AckScan
from .fin_scan import FinScan
from .udp_scan import UdpScan
| 26.333333
| 37
| 0.841772
| 25
| 158
| 5.12
| 0.52
| 0.390625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126582
| 158
| 5
| 38
| 31.6
| 0.927536
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c5fc611643e063be4400550845a7bea8a44a76d2
| 66
|
py
|
Python
|
crowdsorting/app_resources/sorting_algorithms/PairallStructure/Node.py
|
matthew-cheney/crowd-sorting-single-threaded
|
f32f05641821f5770dd95787c459888101b93d05
|
[
"MIT"
] | 1
|
2019-11-30T07:59:25.000Z
|
2019-11-30T07:59:25.000Z
|
crowdsorting/app_resources/sorting_algorithms/PairallStructure/Node.py
|
mchen95/crowd-sorting
|
f32f05641821f5770dd95787c459888101b93d05
|
[
"MIT"
] | 2
|
2019-10-14T17:16:46.000Z
|
2019-10-21T23:14:32.000Z
|
crowdsorting/app_resources/sorting_algorithms/PairallStructure/Node.py
|
matthew-cheney/crowd-sorting-single-threaded
|
f32f05641821f5770dd95787c459888101b93d05
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, doc):
self.doc = doc
| 11
| 28
| 0.560606
| 9
| 66
| 3.666667
| 0.666667
| 0.424242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 66
| 5
| 29
| 13.2
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
680b4dc17363984c3869a48d34fa64de816c06e6
| 364
|
py
|
Python
|
test/collectd/__init__.py
|
SumoLogic/sumologic-collectd-plugin
|
a387fa9f8116fc6b56fbcd9628e074e4b602b606
|
[
"Apache-2.0"
] | 10
|
2017-08-08T20:28:38.000Z
|
2022-02-09T21:46:10.000Z
|
test/collectd/__init__.py
|
SumoLogic/sumologic-collectd-plugin
|
a387fa9f8116fc6b56fbcd9628e074e4b602b606
|
[
"Apache-2.0"
] | 26
|
2017-08-08T20:36:56.000Z
|
2022-01-12T15:33:30.000Z
|
test/collectd/__init__.py
|
SumoLogic/sumologic-collectd-plugin
|
a387fa9f8116fc6b56fbcd9628e074e4b602b606
|
[
"Apache-2.0"
] | 7
|
2018-04-16T15:29:37.000Z
|
2021-09-05T12:02:11.000Z
|
# Due to circular import problems in sumologic_collectd_metrics/__init__.py, this
# import needs to happen before the Helper
from .register import register_config # isort: skip
from .collectd_mock import CollecdMock
from .helper import Helper
from .logger import debug, error, info, warning
from .register import register_init, register_shutdown, register_write
| 40.444444
| 81
| 0.824176
| 51
| 364
| 5.666667
| 0.607843
| 0.069204
| 0.124567
| 0.179931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129121
| 364
| 8
| 82
| 45.5
| 0.911672
| 0.362637
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a861b39f5df9122d4e7a06da66c5c4480d12cbaa
| 24,493
|
py
|
Python
|
librad.py
|
nbayer2/UV-Measurements_Thesis
|
7705a8cb50ba20b8cb5c4522fd88bd1806818166
|
[
"CC0-1.0"
] | null | null | null |
librad.py
|
nbayer2/UV-Measurements_Thesis
|
7705a8cb50ba20b8cb5c4522fd88bd1806818166
|
[
"CC0-1.0"
] | null | null | null |
librad.py
|
nbayer2/UV-Measurements_Thesis
|
7705a8cb50ba20b8cb5c4522fd88bd1806818166
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 10:43:23 2020
@author: nbayer
"""
import load_cams_data as ld
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import argparse
import netCDF4 as nc4
from netCDF4 import date2num,num2date
from datetime import datetime
import xarray as xr
"""
#Process for all latitudes and longitudes
python librad.py -d 2015-06-30 -wvls 280 310 340 370 400 469 500
#Process for a specific latitude and longitude
python librad.py -d 2015-06-30 -wvls 280 310 340 370 400 469 500 -lat 51.25 -lon 12.9
"""
"""for calling the function from the terminal"""
parser = argparse.ArgumentParser(description='Process to create and running input_files for LibRadTran from the CAMS_ra for a specific date')
parser.add_argument('-d', type=str, required=True, dest='date', # the variable is saved in args.date as a string
help='Insert the date as 2019-01-07(YYYY-MM-DD)')
parser.add_argument('-wvls', nargs='+',type=float, required=True, dest='wvls', # the variable is saved in args.wvls as a string
help='Insert the wavelengths for the calculations as wvls1 wvls2 ... wvlsn')
parser.add_argument('-lat', type=float, dest='lat',
help="for using a specific latitude")
parser.add_argument('-lon', type=float, dest='lon',
help="for using a specific longitude")
args = parser.parse_args()
"""Define path+file where the CAMS.nc are located"""
fsfc = "/vols/satellite/home/jonas/documents/paper/2020_clearsky_aerosoleffect/scripts/ecmwf/data/nc/cams-ra_"+args.date+"_sfc.nc"
fml = "/vols/satellite/home/jonas/documents/paper/2020_clearsky_aerosoleffect/scripts/ecmwf/data/nc/cams-ra_"+args.date+"_ml.nc"
"""load the CAMS data from the chosen netCDF"""
c = ld.CAMS(fsfc,fml)
"""load atmospheric and gases Constants from load_cams_data.py"""
SI=ld.SI
constants=ld.CONSTANTS
#t_step=14*14 # every time step starts after t_step (grid points)
t_step=41*41
"""Choose a Latitude to be used for the input_file for LibRadtran"""
if args.lat:
lat=51.5
lats=c.lats.reshape((8,14,14)) # print(lats[0,:,0]) to see all lats
lat_grid=[]
lat_grid.extend(abs(lats[0,:,0]-args.lat))
x=lat_grid.index(min(lat_grid)) # x is the position of the chosen lat
"""Choose a Longitude to be used for the input_file for LibRadtran"""
if args.lon:
lon=12.93
lons=c.lons.reshape((8,14,14)) # print(lons[0,0,:]) to see all lons
lon_grid=[]
lon_grid.extend(abs(lons[0,0,:]-args.lon))
y=lon_grid.index(min(lon_grid)) # x is the position of the chosen lon
"""Choose the Wavelengths """
wvls=args.wvls
times=[] #contains all the time steps ##times[1].strftime("%m/%d/%y")
for ti in range(0, len(c.times),t_step):
times.append(pd.to_datetime(c.times[ti]))
"""Define the path to save the atmospheric_file"""
atmo_path='/vols/satellite/home/bayer/libradtran/Libradtran-files/atmospheric_files/'
"""load the aerosol data for the chosen wavelengths"""
AP_sfc,AP_ml = c.aerosol_optprop(wvls)
UVI=pd.DataFrame({'Date':[],'UVI':[]})
for t in range(3,5):
if args.lat and args.lon:
"""crating DataFrame for the atmospheric_file and gases_files"""
data=pd.DataFrame({'z(km)':[],'p(mb)':[],'T(K)':[],'air(# * cm-3)':[]})
ozone=pd.DataFrame({'z(km)':[],'O3 (mass mixing ratio) [kg kg^-1]':[]})
no2=pd.DataFrame({'z(km)':[],'NO2 (mass mixing ratio) [kg kg^-1]':[]})
h2o=pd.DataFrame({'z(km)':[],'H2O (mass mixing ratio) [kg kg^-1]':[]})
for z in range(0,60):
data=data.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
'p(mb)':('{0:.5f}'.format(c.P_mlvl.reshape((8,14,14,60))[t,x,y,z]/100)),
'T(K)':('{0:.3f}'.format(c.cams_ml.t[t,z,x,y].values)),
'air(# * cm-3)':('{0:.7}'.format((c.P_mlvl.reshape((8,14,14,60))[t,x,y,z]/1.e+6/c.cams_ml.t[t,z,x,y].values/SI.k)))
},ignore_index=True)
ozone=ozone.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
'O3 (mass mixing ratio) [kg kg^-1]':'{0:.7}'.format(c.cams_ml.go3[t,z,x,y].values)
},ignore_index=True)
no2=no2.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
'NO2 (mass mixing ratio) [kg kg^-1]':'{0:.7}'.format(c.cams_ml.no2[t,z,x,y].values)
},ignore_index=True)
h2o=h2o.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
'H2O (mass mixing ratio) [kg kg^-1]':'{0:.7}'.format(c.cams_ml.q[t,z,x,y].values/(1-c.cams_ml.q[t,z,x,y].values))
},ignore_index=True)
""" create path for saving the variables files"""
atmo_file=atmo_path+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'.dat'
mol_file_o3=atmo_path+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'-ozone.dat'
mol_file_no2=atmo_path+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'-no2.dat'
mol_file_h2o=atmo_path+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'-h2o.dat'
"""save the atmospheric data file as .dat"""
data.to_csv(atmo_file, columns=['z(km)','p(mb)','T(K)','air(# * cm-3)'],
sep=' ', encoding='utf-8', header=False,index=False)
ozone.to_csv(mol_file_o3, columns=['z(km)','O3 (mass mixing ratio) [kg kg^-1]'],
sep=' ', encoding='utf-8', header=False,index=False)
no2.to_csv(mol_file_no2, columns=['z(km)','NO2 (mass mixing ratio) [kg kg^-1]'],
sep=' ', encoding='utf-8', header=False,index=False)
h2o.to_csv(mol_file_h2o, columns=['z(km)','H2O (mass mixing ratio) [kg kg^-1]'],
sep=' ', encoding='utf-8', header=False,index=False)
"""crating DataFrame for the aerosol_file"""
aerosol_file=pd.DataFrame({'z(km)':[],'aer_layer':[]})
for z in range(0,60,2):
layer_data=pd.DataFrame({'wavelength':[],'extintion coeffient [km-1]':[],'single scattering albedo':[],
'0':[],'1':[],'2':[],'3':[],'4':[],'5':[],'6':[],})
for n in range(0,len(wvls)):
layer_data=layer_data.append({'wavelength':wvls[n],
'extintion coeffient [km-1]':('{0:.7}'.format(np.array(AP_ml.ext).reshape((8,14,14,60,len(wvls)))[t,x,y,z,n])),
'single scattering albedo':('{0:.7}'.format(np.array(AP_ml.ssa).reshape((8,14,14,60,len(wvls)))[t,x,y,z,n])),
'0':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**0)),
'1':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**1)),
'2':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**2)),
'3':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**3)),
'4':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**4)),
'5':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**5)),
'6':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**6))
},ignore_index=True)
layer_file=atmo_path+'Aerosol/layers/'+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'-z'+str(z)+'.LAYER'
layer_data.to_csv(layer_file, columns=['wavelength',
'extintion coeffient [km-1]',
'single scattering albedo',
'0',
'1',
'2',
'3',
'4',
'5',
'6'],sep=' ', encoding='utf-8', header=False,index=False)
aerosol_file=aerosol_file.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
'aer_layer': layer_file
},ignore_index=True)
aerosol_file.to_csv(atmo_path+'Aerosol/'+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y),
columns=['z(km)','aer_layer'],
sep=' ', encoding='utf-8', header=False,index=False)
"""write the paths and input files for libradtran for each day and hs"""
input_file="/vols/satellite/home/bayer/libradtran/Libradtran-files/input_files/"+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+".txt"
output_file="/vols/satellite/home/bayer/libradtran/Libradtran-files/output_files/"+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+".txt"
f=open(input_file, "w+")
f.write
f.write("\nday_of_year "+times[t].strftime('%j')) # Correct for Earth-Sun distance
f.write("\ndata_files_path /home/nbayer/libRadtran-2.0.3/data/")
f.write("\natmosphere_file midlatitude_summer")
#f.write("\natmosphere_file "+atmo_file)
f.write("\nsource solar ../solar_flux/kurudz_0.1nm.dat per_nm ") #line identifies the location of the extraterrestrial solar flux file which defines the spectral resolution.
# f.write("\nmol_file O3 "+mol_file_o3+ ' mmr')
# f.write("\nmol_file NO2 "+mol_file_no2+ ' mmr')
# f.write("\nmol_file H2O "+mol_file_h2o+ ' mmr')
# f.write("\npressure "+str(c.cams_sfc.psfc[t,x,y].values/100))
f.write("\nsza "+str(c.sza.reshape((8,14,14))[t,x,y]))
# f.write("\nphi0 "+str(c.azi.reshape((8,14,14))[t,x,y]))
f.write("\nmol_abs_param sbdart #spectralcalculation resolver, should be the best option for UV Indax calculations")
f.write("\naerosol_default") #switch the use of aerosol data on
f.write("\naerosol_file explicit "+atmo_path+'Aerosol/'+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y))
#f.write("\nck_lowtran_absorption O4 off")
# f.write("\nrte_solver disort")
# f.write("\nrte_solver fdisort2")
# f.write("\nrte_solver twostr")
# f.write("\nrte_solver rodents")
f.write("\nrte_solver polradtran^")
f.write("\ndisort_intcor moments")
# f.write("\nno_absorption mol")
# f.write("\nno_scattering mol")
f.write("\nwavelength "+str(min(wvls))+" "+ str(max(wvls))) #Wavelength range [nm]
f.write("\noutput_process per_nm")
f.write("\nverbose")
f.close()
Librad_path='/home/nbayer/libRadtran-2.0.3/bin/uvspec'
"""Running LibRadTran with the input_file frim the step above and saving it in the output_file"""
"""run the script from /home/nbayer/libRadtran-2.0.3/bin/ """
os.system(Librad_path+" < "+input_file+" > "+output_file)
else:
print('introduce lat and lon')
break
# for x in range(0,14):
# for y in range(0,14):
# """crating DataFrame for the atmospheric_file and gases_files"""
# data=pd.DataFrame({'z(km)':[],'p(mb)':[],'T(K)':[],'air(# * cm-3)':[]})
# ozone=pd.DataFrame({'z(km)':[],'O3 (mass mixing ratio) [kg kg^-1]':[]})
# no2=pd.DataFrame({'z(km)':[],'NO2 (mass mixing ratio) [kg kg^-1]':[]})
# h2o=pd.DataFrame({'z(km)':[],'H2O (mass mixing ratio) [kg kg^-1]':[]})
# for z in range(0,60):
# data=data.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
# 'p(mb)':('{0:.5f}'.format(c.P_mlvl.reshape((8,14,14,60))[t,x,y,z]/100)),
# 'T(K)':('{0:.3f}'.format(c.cams_ml.t[t,z,x,y].values)),
# 'air(# * cm-3)':('{0:.7}'.format((c.P_mlvl.reshape((8,14,14,60))[t,x,y,z]/1.e+6/c.cams_ml.t[t,z,x,y].values/SI.k)))
# },ignore_index=True)
# ozone=ozone.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
# 'O3 (mass mixing ratio) [kg kg^-1]':'{0:.7}'.format(c.cams_ml.go3[t,z,x,y].values)
# },ignore_index=True)
# no2=no2.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
# 'NO2 (mass mixing ratio) [kg kg^-1]':'{0:.7}'.format(c.cams_ml.no2[t,z,x,y].values)
# },ignore_index=True)
# h2o=h2o.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
# 'H2O (mass mixing ratio) [kg kg^-1]':'{0:.7}'.format(c.cams_ml.q[t,z,x,y].values/(1-c.cams_ml.q[t,z,x,y].values))
# },ignore_index=True)
# """ create path for saving the variables files"""
# atmo_file=atmo_path+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'.dat'
# mol_file_o3=atmo_path+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'-ozone.dat'
# mol_file_no2=atmo_path+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'-no2.dat'
# mol_file_h2o=atmo_path+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'-h2o.dat'
# """save the atmospheric data file as .dat"""
# data.to_csv(atmo_file, columns=['z(km)','p(mb)','T(K)','air(# * cm-3)'],
# sep=' ', encoding='utf-8', header=False,index=False)
# ozone.to_csv(mol_file_o3, columns=['z(km)','O3 (mass mixing ratio) [kg kg^-1]'],
# sep=' ', encoding='utf-8', header=False,index=False)
# no2.to_csv(mol_file_no2, columns=['z(km)','NO2 (mass mixing ratio) [kg kg^-1]'],
# sep=' ', encoding='utf-8', header=False,index=False)
# h2o.to_csv(mol_file_h2o, columns=['z(km)','H2O (mass mixing ratio) [kg kg^-1]'],
# sep=' ', encoding='utf-8', header=False,index=False)
# """crating DataFrame for the aerosol_file"""
# aerosol_file=pd.DataFrame({'z(km)':[],'aer_layer':[]})
# for z in range(0,60,2):
# layer_data=pd.DataFrame({'wavelength':[],'extintion coeffient [km-1]':[],'single scattering albedo':[],
# '0':[],'1':[],'2':[],'3':[],'4':[],'5':[],'6':[],})
# for n in range(0,len(wvls)):
# layer_data=layer_data.append({'wavelength':wvls[n],
# 'extintion coeffient [km-1]':('{0:.7}'.format(np.array(AP_ml.ext).reshape((8,14,14,60,len(wvls)))[t,x,y,z,n])),
# 'single scattering albedo':('{0:.7}'.format(np.array(AP_ml.ssa).reshape((8,14,14,60,len(wvls)))[t,x,y,z,n])),
# '0':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**0)),
# '1':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**1)),
# '2':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**2)),
# '3':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**3)),
# '4':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**4)),
# '5':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**5)),
# '6':('{0:.4f}'.format(AP_ml.g.values.reshape((8,14,14,60,len(wvls)))[t,x,y,z,n]**6))
# },ignore_index=True)
# layer_file=atmo_path+'Aerosol/layers/'+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+'-z'+str(z)+'.LAYER'
# layer_data.to_csv(layer_file, columns=['wavelength',
# 'extintion coeffient [km-1]',
# 'single scattering albedo',
# '0',
# '1',
# '2',
# '3',
# '4',
# '5',
# '6'],sep=' ', encoding='utf-8', header=False,index=False)
# aerosol_file=aerosol_file.append({'z(km)':('{0:.3f}'.format(c.z_mlvl.reshape((8,14,14,60))[t,x,y,z]/1000)),
# 'aer_layer': layer_file
# },ignore_index=True)
# aerosol_file.to_csv(atmo_path+'Aerosol/'+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y),
# columns=['z(km)','aer_layer'],
# sep=' ', encoding='utf-8', header=False,index=False)
# """write the paths and input files for libradtran for each day and hs"""
# input_file="/vols/satellite/home/bayer/libradtran/Libradtran-files/input_files/"+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+".txt"
# output_file="/vols/satellite/home/bayer/libradtran/Libradtran-files/output_files/"+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y)+".txt"
# f=open(input_file, "w+")
# f.write
# f.write("\ndata_files_path /home/nbayer/libRadtran-2.0.3/data/")
# f.write("\natmosphere_file "+atmo_file)
# f.write("\nsource solar ../solar_flux/kurudz_0.1nm.dat per_nm ") #line identifies the location of the extraterrestrial solar flux file which defines the spectral resolution.
# f.write("\nmol_file O3 "+mol_file_o3+ ' mmr')
# f.write("\nmol_file NO2 "+mol_file_no2+ ' mmr')
# f.write("\nmol_file H2O "+mol_file_h2o+ ' mmr')
# f.write("\npressure "+str(c.cams_sfc.psfc[t,x,y].values/100))
# f.write("\nsza "+str(c.sza.reshape((8,14,14))[t,x,y]))
# # f.write("\nphi0 "+str(c.azi.reshape((8,14,14))[t,x,y]))
# f.write("\nmol_abs_param sbdart #spectralcalculation resolver, should be the best option for UV Indax calculations")
# f.write("\naerosol_default") #switch the use of aerosol data on
# f.write("\naerosol_file explicit "+atmo_path+'Aerosol/'+times[t].strftime('%Y%m%d:%H')+'lat_lon:'+str(x)+'_'+str(y))
# #f.write("\nck_lowtran_absorption O4 off")
# f.write("\nrte_solver disort")
# f.write("\ndisort_intcor moments")
# # f.write("\nno_absorption mol")
# # f.write("\nno_scattering mol")
# f.write("\nwavelength "+str(min(wvls))+" "+ str(max(wvls))) #Wavelength range [nm]
# f.write("\noutput_process per_nm")
# f.write("\nverbose")
# f.close()
# Librad_path='/home/nbayer/libRadtran-2.0.3/bin/uvspec'
# """Running LibRadTran with the input_file frim the step above and saving it in the output_file"""
# """run the script from /home/nbayer/libRadtran-2.0.3/bin/ """
# os.system(Librad_path+" < "+input_file+" > "+output_file)
if os.stat(output_file).st_size>100:
"""Reading the output_file and dividing the columns in variables"""
data_out=np.loadtxt(output_file,dtype=float)
"""calculating and adding in the array the total irradiance values"""
data_out=np.c_[data_out,data_out[:,1]+data_out[:,2],(data_out[:,4]+data_out[:,5]+data_out[:,6])]
data_out=np.c_[data_out,np.ones(len(data_out)),np.ones(len(data_out))]
for f in range(0,len(data_out)):
if data_out[f,0]<=298:
data_out[f,9]=1
data_out[f,10]=data_out[f,9]*(data_out[f,1]+data_out[f,2])
elif int(data_out[f,0]) in range(299,328):
data_out[f,9]=10**(0.094*(298-int(data_out[f,0])))
data_out[f,10]=data_out[f,9]*(data_out[f,1]+data_out[f,2])
elif data_out[f,0] in range(328,401):
data_out[f,9]=10**(0.015*(139-int(data_out[f,0])))
data_out[f,10]=data_out[f,9]*(data_out[f,1]+data_out[f,2])
else:
data_out[f,9]=0
data_out[f,10]=data_out[f,9]*(data_out[f,1]+data_out[f,2])
UVI=UVI.append({'Date':times[t].strftime('%Y%m%d:%H'),'UVI': (np.sum(data_out[:,10])/25)},
ignore_index=True)
# plt.step(data_out[:,0],data_out[:,7], label='Total downward irradiance '+times[t].strftime('%H'))
plt.plot(data_out[:,0],data_out[:,7], label='Total downward irradiance '+times[t].strftime('%H'))
# plt.step(data_out[:,0],data_out[:,1], label='Direct irradiance '+times[t].strftime('%H'))
plt.plot(data_out[:,0],data_out[:,1], label='Direct irradiance '+times[t].strftime('%H'))
plt.legend()
plt.title(times[t].strftime('%Y%m%d:%H'))
# plt.show()
plt.savefig(times[t].strftime('%Y%m%d:%H')+ '_plot2.png', dpi=300)
plt.close()
# data[:,1].plot(title=str(output_file[47:60]),xlim=[280,500],ylim=[0,2000])
else:
UVI=UVI.append({'Date':times[t].strftime('%Y%m%d:%H'),'UVI': 'NaN'},
ignore_index=True)
print(UVI)
"""read netCDF"""
nc = nc4.Dataset('/vols/satellite/home/bayer/uv/netCDF/20190723.nc','r')
time=nc.variables['time'][:]
time=num2date(time[:],units='seconds since 1970-01-01T00:00:00')
spect=np.array(nc.variables['spect'][:]*1000)
uvind=np.array(nc.variables['uvind'][:])
nc=xr.open_dataset('/vols/satellite/home/bayer/uv/netCDF/20190723.nc')
plt.plot(np.arange(290, 400.01, 0.1),nc.spect[6,:])
for p in range(0,len(time)):
if time[p].strftime('%H%M')=='1200':
break
fig=plt.figure()
# datetime.fromtimetamp(nc.variables['time'][195]).strftime("%B %d, %Y %I:%M:%S")
plt.plot(spect[p,0:400],'r',label='Melpitz '+time[p].strftime('%m-%d %H:%M'))
plt.plot(data_out[:,0],data_out[:,1],'b', label='Direct irradiance [Libradtran]')
plt.plot(data_out[:,0],data_out[:,1]+data_out[:,2],'g', label='Global irradiance [Libradtran]')
plt.plot([], [], ' ',label='UV-Index [Libradtran]= '+str(round(UVI['UVI'][len(UVI)-1],2)))
plt.plot([], [], ' ',label='UV-Index [Spectrometer]= '+str(round(uvind[p],2)))
# plt.xlim(501)
plt.legend()
plt.grid()
plt.title(time[p].strftime("%B %d, %Y %I:%M:%S"))
# plt.xticks(280,500)
plt.show()
| 61.695214
| 194
| 0.505083
| 3,475
| 24,493
| 3.453237
| 0.117986
| 0.030333
| 0.031667
| 0.038
| 0.77525
| 0.763833
| 0.74725
| 0.736167
| 0.724667
| 0.716833
| 0
| 0.052811
| 0.292614
| 24,493
| 396
| 195
| 61.85101
| 0.63979
| 0.387417
| 0
| 0.131148
| 0
| 0.010929
| 0.198965
| 0.046802
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.054645
| 0
| 0.054645
| 0.010929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a87237299e4344ef672744f80162c90707dcd9b8
| 117
|
py
|
Python
|
ide/fuente/teoserver/api/admin.py
|
jossehblanco/ProgramacionVisual
|
59354dc3c9448c997687420fceb179a76378e9de
|
[
"MIT"
] | 1
|
2021-02-26T19:54:42.000Z
|
2021-02-26T19:54:42.000Z
|
ide/objeto/teoserver/api/admin.py
|
jossehblanco/ProgramacionVisual
|
59354dc3c9448c997687420fceb179a76378e9de
|
[
"MIT"
] | null | null | null |
ide/objeto/teoserver/api/admin.py
|
jossehblanco/ProgramacionVisual
|
59354dc3c9448c997687420fceb179a76378e9de
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Params
# Register your models here.
admin.site.register(Params)
| 23.4
| 32
| 0.811966
| 17
| 117
| 5.588235
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119658
| 117
| 5
| 33
| 23.4
| 0.92233
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a8894248ec7affad658e52eef09297d301877de5
| 158
|
py
|
Python
|
utils/PredictiveModels/__init__.py
|
vd1371/GIAMS
|
dd6551f344b8d0377131d4496846eb5d03b6189c
|
[
"MIT"
] | null | null | null |
utils/PredictiveModels/__init__.py
|
vd1371/GIAMS
|
dd6551f344b8d0377131d4496846eb5d03b6189c
|
[
"MIT"
] | null | null | null |
utils/PredictiveModels/__init__.py
|
vd1371/GIAMS
|
dd6551f344b8d0377131d4496846eb5d03b6189c
|
[
"MIT"
] | null | null | null |
from .ExponentialGrowth import Exponential
from .GBM import GBM
from .Linear import Linear
from .PowerGrowth import Power
from .WienerDrift import WienerDrift
| 31.6
| 42
| 0.848101
| 20
| 158
| 6.7
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120253
| 158
| 5
| 43
| 31.6
| 0.964029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a88dc5b4cd64ffaa5a70afb759ece5d5b7eb3d1c
| 52
|
py
|
Python
|
twilight/menus/__init__.py
|
Just-Jojo/Twilight-bot
|
1256e9568b7d05e60fb9697df950435de72add38
|
[
"MIT"
] | null | null | null |
twilight/menus/__init__.py
|
Just-Jojo/Twilight-bot
|
1256e9568b7d05e60fb9697df950435de72add38
|
[
"MIT"
] | 8
|
2020-11-17T04:57:17.000Z
|
2021-03-19T22:25:49.000Z
|
twilight/menus/__init__.py
|
Just-Jojo/Twilight-bot
|
1256e9568b7d05e60fb9697df950435de72add38
|
[
"MIT"
] | 2
|
2021-01-05T22:32:45.000Z
|
2021-02-02T13:39:23.000Z
|
from .menus import TwilightMenu, TwilightPageSource
| 26
| 51
| 0.865385
| 5
| 52
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 52
| 1
| 52
| 52
| 0.957447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a8c32348cd35c9cd94aa00a18efdd4bba13b75d8
| 84
|
py
|
Python
|
test_macro/cases/__init__.py
|
kerryeon/test-macro
|
a65f12d7f6f1a679070e974f2abacfed7634c2c6
|
[
"MIT"
] | null | null | null |
test_macro/cases/__init__.py
|
kerryeon/test-macro
|
a65f12d7f6f1a679070e974f2abacfed7634c2c6
|
[
"MIT"
] | null | null | null |
test_macro/cases/__init__.py
|
kerryeon/test-macro
|
a65f12d7f6f1a679070e974f2abacfed7634c2c6
|
[
"MIT"
] | null | null | null |
from .case import MacroCase
from .file import MacroFile
from .yaml import MacroYAML
| 21
| 27
| 0.821429
| 12
| 84
| 5.75
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 84
| 3
| 28
| 28
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a8ce307765f1475d8d9e00d7eaa9d12cdfc8b928
| 3,010
|
py
|
Python
|
runtime/bindings/python/src/openvino/__init__.py
|
fengyisun/openvino
|
661d4363251f40dcda805765ac52914151954e12
|
[
"Apache-2.0"
] | 1
|
2020-09-28T08:56:20.000Z
|
2020-09-28T08:56:20.000Z
|
runtime/bindings/python/src/openvino/__init__.py
|
fengyisun/openvino
|
661d4363251f40dcda805765ac52914151954e12
|
[
"Apache-2.0"
] | 34
|
2020-11-19T13:15:42.000Z
|
2022-02-21T13:13:02.000Z
|
runtime/bindings/python/src/openvino/__init__.py
|
sbalandi/openvino
|
519951a4a9f979c1b04529dda821111c56113716
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""openvino module namespace, exposing factory functions for all ops and other classes."""
# noqa: F401
from pkg_resources import get_distribution, DistributionNotFound
__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore # mypy issue #1422
try:
__version__ = get_distribution("openvino-core").version
except DistributionNotFound:
__version__ = "0.0.0.dev0"
from openvino.ie_api import BlobWrapper
from openvino.ie_api import infer
from openvino.ie_api import async_infer
from openvino.ie_api import get_result
from openvino.ie_api import blob_from_file
from openvino.impl import Dimension
from openvino.impl import Function
from openvino.impl import Node
from openvino.impl import PartialShape
from openvino.impl import Layout
from openvino.pyopenvino import Core
from openvino.pyopenvino import IENetwork
from openvino.pyopenvino import ExecutableNetwork
from openvino.pyopenvino import Version
from openvino.pyopenvino import Parameter
from openvino.pyopenvino import InputInfoPtr
from openvino.pyopenvino import InputInfoCPtr
from openvino.pyopenvino import DataPtr
from openvino.pyopenvino import TensorDesc
from openvino.pyopenvino import get_version
from openvino.pyopenvino import StatusCode
from openvino.pyopenvino import InferQueue
from openvino.pyopenvino import InferRequest # TODO: move to ie_api?
from openvino.pyopenvino import Blob
from openvino.pyopenvino import PreProcessInfo
from openvino.pyopenvino import MeanVariant
from openvino.pyopenvino import ResizeAlgorithm
from openvino.pyopenvino import ColorFormat
from openvino.pyopenvino import PreProcessChannel
from openvino.pyopenvino import Tensor
from openvino import opset1
from openvino import opset2
from openvino import opset3
from openvino import opset4
from openvino import opset5
from openvino import opset6
from openvino import opset7
from openvino import opset8
# Extend Node class to support binary operators
Node.__add__ = opset8.add
Node.__sub__ = opset8.subtract
Node.__mul__ = opset8.multiply
Node.__div__ = opset8.divide
Node.__truediv__ = opset8.divide
Node.__radd__ = lambda left, right: opset8.add(right, left)
Node.__rsub__ = lambda left, right: opset8.subtract(right, left)
Node.__rmul__ = lambda left, right: opset8.multiply(right, left)
Node.__rdiv__ = lambda left, right: opset8.divide(right, left)
Node.__rtruediv__ = lambda left, right: opset8.divide(right, left)
Node.__eq__ = opset8.equal
Node.__ne__ = opset8.not_equal
Node.__lt__ = opset8.less
Node.__le__ = opset8.less_equal
Node.__gt__ = opset8.greater
Node.__ge__ = opset8.greater_equal
# Patching for Blob class
# flake8: noqa: F811
# this class will be removed
Blob = BlobWrapper
# Patching ExecutableNetwork
ExecutableNetwork.infer = infer
# Patching InferRequest
InferRequest.infer = infer
InferRequest.async_infer = async_infer
InferRequest.get_result = get_result
# Patching InferQueue
InferQueue.async_infer = async_infer
| 34.204545
| 100
| 0.827575
| 395
| 3,010
| 6.022785
| 0.316456
| 0.191677
| 0.184952
| 0.235393
| 0.115595
| 0.057167
| 0.033628
| 0.033628
| 0
| 0
| 0
| 0.016886
| 0.114618
| 3,010
| 87
| 101
| 34.597701
| 0.875797
| 0.135216
| 0
| 0
| 0
| 0
| 0.01161
| 0
| 0
| 0
| 0
| 0.011494
| 0
| 1
| 0
| false
| 0
| 0.606061
| 0
| 0.606061
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a8ce586c71e9e694edeb9e547d6c6408c0e4ed8a
| 19
|
py
|
Python
|
venv/lib/python3.8/site-packages/django_on_heroku/__init__.py
|
Joshua-Barawa/My-Photos
|
adcaea48149c6b31e9559b045709d538d0b749bc
|
[
"PostgreSQL",
"Unlicense"
] | 498
|
2017-12-11T16:31:26.000Z
|
2022-03-08T06:35:40.000Z
|
venv/lib/python3.8/site-packages/django_on_heroku/__init__.py
|
Joshua-Barawa/My-Photos
|
adcaea48149c6b31e9559b045709d538d0b749bc
|
[
"PostgreSQL",
"Unlicense"
] | 40
|
2017-12-11T20:51:07.000Z
|
2019-09-30T20:19:21.000Z
|
venv/lib/python3.8/site-packages/django_on_heroku/__init__.py
|
Joshua-Barawa/My-Photos
|
adcaea48149c6b31e9559b045709d538d0b749bc
|
[
"PostgreSQL",
"Unlicense"
] | 183
|
2017-12-11T17:43:18.000Z
|
2022-03-31T04:10:11.000Z
|
from .core import *
| 19
| 19
| 0.736842
| 3
| 19
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7641580a71a66e631c70f95bb9379d380beef2a8
| 835
|
py
|
Python
|
nqlib/linalg.py
|
knttnk/NQLib
|
318e244ce28b4e72ef7b676392182bb20cf62145
|
[
"MIT"
] | 2
|
2021-10-29T20:17:07.000Z
|
2022-01-11T09:38:07.000Z
|
nqlib/linalg.py
|
knttnk/NQLib
|
318e244ce28b4e72ef7b676392182bb20cf62145
|
[
"MIT"
] | null | null | null |
nqlib/linalg.py
|
knttnk/NQLib
|
318e244ce28b4e72ef7b676392182bb20cf62145
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.sparse.linalg
np.set_printoptions(precision=5, suppress=True)
array = np.array
def matrix(M) -> np.ndarray:
return np.array(M, ndmin=2)
def kron(A, B) -> np.ndarray:
return matrix(np.kron(A, B))
def block(M) -> np.ndarray:
return matrix(np.block(M))
def eye(N, M=None) -> np.ndarray:
return matrix(np.eye(N, M))
def norm(A: np.ndarray) -> float:
return np.linalg.norm(A, ord=np.inf)
def zeros(shape) -> np.ndarray:
return matrix(np.zeros(shape))
def ones(shape) -> np.ndarray:
return matrix(np.ones(shape))
def pinv(a: np.ndarray) -> np.ndarray:
return matrix(np.linalg.pinv(a))
def eig_max(A) -> float:
return max(abs(np.linalg.eig(A)[0]))
def mpow(A: np.ndarray, x) -> np.ndarray:
return matrix(scipy.linalg.fractional_matrix_power(A, x))
| 17.765957
| 61
| 0.65988
| 141
| 835
| 3.879433
| 0.312057
| 0.180987
| 0.219378
| 0.268739
| 0.270567
| 0.102377
| 0
| 0
| 0
| 0
| 0
| 0.004342
| 0.172455
| 835
| 46
| 62
| 18.152174
| 0.787265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0.083333
| 0.416667
| 0.916667
| 0.041667
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
7650247673d216a7fd49386ad4fe4404ab9a9e5e
| 304
|
py
|
Python
|
storm/defaults.py
|
novagodb/storm
|
1a3624d6fe157cb68a948adc3b6273db6ab06ce7
|
[
"MIT"
] | 2,293
|
2015-01-07T23:38:25.000Z
|
2022-03-30T20:37:56.000Z
|
storm/defaults.py
|
novagodb/storm
|
1a3624d6fe157cb68a948adc3b6273db6ab06ce7
|
[
"MIT"
] | 71
|
2015-01-20T09:01:33.000Z
|
2021-12-07T00:24:59.000Z
|
Lib/site-packages/storm/defaults.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 167
|
2015-01-08T01:48:53.000Z
|
2022-02-26T07:41:25.000Z
|
# -*- coding: utf-8 -*-
import getpass
DEFAULT_PORT = 22
DEFAULT_USER = getpass.getuser()
def get_default(key, defaults={}):
if key == 'port':
return defaults.get("port", DEFAULT_PORT)
if key == 'user':
return defaults.get("user", DEFAULT_USER)
return defaults.get(key)
| 17.882353
| 49
| 0.631579
| 39
| 304
| 4.794872
| 0.410256
| 0.224599
| 0.272727
| 0.224599
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012605
| 0.217105
| 304
| 17
| 50
| 17.882353
| 0.773109
| 0.069079
| 0
| 0
| 0
| 0
| 0.056738
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.222222
| 0.111111
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
7684adde2b3af805d143f9189578ba7ea5feeb4b
| 5,043
|
py
|
Python
|
simulation/test_estimator_templates.py
|
zhouyifan233/bayou
|
2401633c9329dc79fd93d043aea1b3514bf48f6a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
simulation/test_estimator_templates.py
|
zhouyifan233/bayou
|
2401633c9329dc79fd93d043aea1b3514bf48f6a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
simulation/test_estimator_templates.py
|
zhouyifan233/bayou
|
2401633c9329dc79fd93d043aea1b3514bf48f6a
|
[
"BSD-3-Clause-Clear"
] | 1
|
2020-03-06T16:08:12.000Z
|
2020-03-06T16:08:12.000Z
|
import numpy as np
from emgpb2.states import Gaussian, GaussianSequence, GMM, GMMSequence
from emgpb2.models import LinearModel, ConstantVelocity, RandomWalk
from emgpb2.EM import SKFEstimator
from emgpb2.EM import LinearGaussianEstimator
# The EM for one Linear Gaussian model.
# Estimate parameters of one Kalman filter.
# Constant Velocity Model.
def test_lg_cv_estimator(init_P=5.0, q=0.5, r=1.0,
state_dim=4, obs_dim=2,
input_measurement='data/measurement1.csv',
verbose=True):
initial_state = Gaussian(np.zeros([state_dim, 1]), (init_P ** 2) * np.eye(state_dim))
initial_model = ConstantVelocity(dt=1.0, q=q, r=r, state_dim=state_dim, obs_dim=obs_dim)
if isinstance(input_measurement, str):
measurements = np.loadtxt(input_measurement, delimiter=',')
else:
measurements = input_measurement
if measurements.ndim == 2:
measurements = np.expand_dims(measurements, axis=-1)
sequence = GaussianSequence(measurements, initial_state)
dataset = [sequence]
model, dataset, LLs = LinearGaussianEstimator.EM(dataset, initial_model,
max_iters=300, threshold=1e-6,
learn_H=True, learn_R=True,
learn_A=True, learn_Q=True, learn_init_state=True,
keep_Q_structure=False, diagonal_Q=False, verbose=verbose)
return model, LLs
# The EM for GPB2.
# Estimate parameters of two Kalman filters.
# Two different Constant Velocity Models.
def test_skf_cv_estimator(init_P: list = [5.0, 5.0], q: list = [2.0, 10.0], r: list = [1.0, 1.0],
state_dim=4, obs_dim=2,
input_measurement='data/measurement2.csv',
verbose=True):
"""
"""
# read measurement data
if isinstance(input_measurement, str):
measurements = np.loadtxt(input_measurement, delimiter=',')
else:
measurements = input_measurement
if measurements.ndim == 2:
measurements = np.expand_dims(measurements, axis=-1)
# Initial state of measurements
num_of_models = len(q)
gaussian_models = []
for i in range(num_of_models):
gaussian_models.append(Gaussian(np.zeros([state_dim, 1]), (init_P[i] ** 2) * np.eye(state_dim)))
initial_gmm_state = GMM(gaussian_models)
# measurement sequence
gmmsequence = GMMSequence(measurements, initial_gmm_state)
dataset = [gmmsequence]
# Initial models
constantvelocity_models = []
for i in range(num_of_models):
constantvelocity_models.append(ConstantVelocity(dt=1.0, q=q[i], r=r[i], state_dim=state_dim, obs_dim=obs_dim))
# Switching matrix
Z = np.ones((2, 2)) / 2
models_all, Z_all, dataset, LLs = SKFEstimator.EM(dataset, constantvelocity_models, Z,
max_iters=300, threshold=1e-6, learn_H=True, learn_R=True,
learn_A=True, learn_Q=True, learn_init_state=False, learn_Z=True,
diagonal_Q=False, wishart_prior=False, verbose=verbose)
return models_all, LLs
# The EM for GPB2.
# Estimate parameters of two Kalman filters.
# Two different Random Walk Models.
def test_skf_rw_estimator(init_P: list = [5.0, 5.0], q: list = [1.0, 20.0], r: list = [2.0, 2.0],
state_dim=2, input_measurement='data/measurement3.csv',
verbose=True):
"""
"""
# read measurement data
if isinstance(input_measurement, str):
measurements = np.loadtxt(input_measurement, delimiter=',')
else:
measurements = input_measurement
if measurements.ndim == 2:
measurements = np.expand_dims(measurements, axis=-1)
# Initial state of measurements
num_of_models = len(q)
gaussian_models = []
for i in range(num_of_models):
gaussian_models.append(Gaussian(np.zeros([state_dim, 1]), (init_P[i] ** 2) * np.eye(state_dim)))
initial_gmm_state = GMM(gaussian_models)
# measurement sequence
gmmsequence = GMMSequence(measurements, initial_gmm_state)
dataset = [gmmsequence]
# Initial models
constantvelocity_models = []
for i in range(num_of_models):
constantvelocity_models.append(RandomWalk(q=q[i], r=r[i], state_dim=state_dim))
# Switching matrix
Z = np.ones((2, 2)) / 2
models_all, Z_all, dataset, LLs = SKFEstimator.EM(dataset, constantvelocity_models, Z,
max_iters=300, threshold=1e-6, learn_H=True, learn_R=True,
learn_A=True, learn_Q=True, learn_init_state=False, learn_Z=True,
diagonal_Q=False, wishart_prior=False, verbose=verbose)
return models_all, LLs
| 43.852174
| 118
| 0.611937
| 613
| 5,043
| 4.833605
| 0.184339
| 0.040499
| 0.022275
| 0.0162
| 0.771178
| 0.763078
| 0.741816
| 0.741816
| 0.719541
| 0.719541
| 0
| 0.023146
| 0.288915
| 5,043
| 114
| 119
| 44.236842
| 0.803123
| 0.100734
| 0
| 0.697368
| 0
| 0
| 0.014699
| 0.014031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039474
| false
| 0
| 0.065789
| 0
| 0.144737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
76ccac99fd441fc5c438c07b5ba4357b1133c8e5
| 199
|
py
|
Python
|
tests/test_utils.py
|
shibli049/expynent
|
6d68aeaa3cd0dde38505b8430fe1e4f9864fe53c
|
[
"BSD-3-Clause"
] | 438
|
2016-10-21T16:13:56.000Z
|
2022-03-26T10:41:40.000Z
|
tests/test_utils.py
|
shibli049/expynent
|
6d68aeaa3cd0dde38505b8430fe1e4f9864fe53c
|
[
"BSD-3-Clause"
] | 72
|
2016-10-21T19:18:52.000Z
|
2021-06-21T11:46:07.000Z
|
tests/test_utils.py
|
shibli049/expynent
|
6d68aeaa3cd0dde38505b8430fe1e4f9864fe53c
|
[
"BSD-3-Clause"
] | 77
|
2016-10-21T22:02:02.000Z
|
2021-08-23T20:23:08.000Z
|
from expynent.shortcuts import is_private
def test_is_private():
private_attr = '_IP_CUSTOM'
public_attr = 'IPv6'
assert is_private(private_attr)
assert not is_private(public_attr)
| 22.111111
| 41
| 0.753769
| 28
| 199
| 4.964286
| 0.535714
| 0.258993
| 0.230216
| 0.28777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.175879
| 199
| 8
| 42
| 24.875
| 0.841463
| 0
| 0
| 0
| 0
| 0
| 0.070352
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4f0fd494ce72e5567f7a0eb56f2597aa621cb1c4
| 76
|
py
|
Python
|
mymusichere/__main__.py
|
dmitrvk/mymusichere-app
|
02a6d5f60a72197e08c98da59b0ef7e7168dcf4b
|
[
"MIT"
] | null | null | null |
mymusichere/__main__.py
|
dmitrvk/mymusichere-app
|
02a6d5f60a72197e08c98da59b0ef7e7168dcf4b
|
[
"MIT"
] | 14
|
2020-06-06T19:08:03.000Z
|
2020-12-03T12:07:04.000Z
|
mymusichere/__main__.py
|
dmitrvk/mymusichere-app
|
02a6d5f60a72197e08c98da59b0ef7e7168dcf4b
|
[
"MIT"
] | null | null | null |
# Licensed under the MIT License
from mymusichere import main
main.main()
| 12.666667
| 32
| 0.776316
| 11
| 76
| 5.363636
| 0.818182
| 0.271186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171053
| 76
| 5
| 33
| 15.2
| 0.936508
| 0.394737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4f29e5d70d8f5c09884e50e0dc66be9b763af694
| 30
|
py
|
Python
|
hdlogger/serializers/tracers/coolpkg/__init__.py
|
incognitoRepo/hdlogger
|
c738161ef3144469ba0f47caf89770613031e96e
|
[
"BSD-2-Clause"
] | null | null | null |
hdlogger/serializers/tracers/coolpkg/__init__.py
|
incognitoRepo/hdlogger
|
c738161ef3144469ba0f47caf89770613031e96e
|
[
"BSD-2-Clause"
] | null | null | null |
hdlogger/serializers/tracers/coolpkg/__init__.py
|
incognitoRepo/hdlogger
|
c738161ef3144469ba0f47caf89770613031e96e
|
[
"BSD-2-Clause"
] | null | null | null |
from .core import (
gen2d
)
| 7.5
| 19
| 0.633333
| 4
| 30
| 4.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.266667
| 30
| 3
| 20
| 10
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4f3b485946076fab06f5d57fd8095d35a63204df
| 122
|
py
|
Python
|
aritmetica.py
|
alimadeoliveiranatalia/Python
|
e80b0a32416a6b46512518c8c9fa5a08950860cf
|
[
"MIT"
] | null | null | null |
aritmetica.py
|
alimadeoliveiranatalia/Python
|
e80b0a32416a6b46512518c8c9fa5a08950860cf
|
[
"MIT"
] | null | null | null |
aritmetica.py
|
alimadeoliveiranatalia/Python
|
e80b0a32416a6b46512518c8c9fa5a08950860cf
|
[
"MIT"
] | null | null | null |
x=5
y=2
print('Soma')
print(x+y)
print('subtraçao')
print(x-y)
print('Multiplição')
print(x*y)
print('Divisão')
print(x/y)
| 12.2
| 20
| 0.680328
| 24
| 122
| 3.458333
| 0.375
| 0.289157
| 0.337349
| 0.433735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017699
| 0.07377
| 122
| 10
| 21
| 12.2
| 0.716814
| 0
| 0
| 0
| 0
| 0
| 0.252033
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.8
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
4f463efbc3a4a1b3f4ac2ac7b1b636d7c261de74
| 72
|
py
|
Python
|
luciani/algorithms/physical_connectivity.py
|
mastrogiovanni/rmi-luciani
|
51efd07ac61660438b11c9d877967f454240d0c1
|
[
"Apache-2.0"
] | null | null | null |
luciani/algorithms/physical_connectivity.py
|
mastrogiovanni/rmi-luciani
|
51efd07ac61660438b11c9d877967f454240d0c1
|
[
"Apache-2.0"
] | null | null | null |
luciani/algorithms/physical_connectivity.py
|
mastrogiovanni/rmi-luciani
|
51efd07ac61660438b11c9d877967f454240d0c1
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import scipy as sc
import pandas as pd
import bct
| 9
| 19
| 0.763889
| 14
| 72
| 3.928571
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236111
| 72
| 7
| 20
| 10.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4f5fa2dca86354f1733af436780aa4a2cc4c31e5
| 113
|
py
|
Python
|
web3tools/__init__.py
|
zepcp/web3tools
|
71dd90518032596549859e1f03c65db508f3f406
|
[
"MIT"
] | null | null | null |
web3tools/__init__.py
|
zepcp/web3tools
|
71dd90518032596549859e1f03c65db508f3f406
|
[
"MIT"
] | null | null | null |
web3tools/__init__.py
|
zepcp/web3tools
|
71dd90518032596549859e1f03c65db508f3f406
|
[
"MIT"
] | 1
|
2022-03-24T09:57:40.000Z
|
2022-03-24T09:57:40.000Z
|
from .ewt import *
from .providers import *
from .reader import *
from .transactor import *
from .utils import *
| 18.833333
| 25
| 0.734513
| 15
| 113
| 5.533333
| 0.466667
| 0.481928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176991
| 113
| 5
| 26
| 22.6
| 0.892473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4f63a9d6771929176cb6dcf9fc80f0fe478d85fa
| 47,467
|
py
|
Python
|
venv/Lib/site-packages/github/tests/IssueEvent.py
|
adamlkl/GithubDataVisualisation
|
94dbdb3411fd41e325b03f17e171509fb64c8696
|
[
"MIT"
] | 2
|
2018-10-04T06:12:38.000Z
|
2021-08-02T16:39:12.000Z
|
venv/Lib/site-packages/github/tests/IssueEvent.py
|
adamlkl/GithubDataVisualisation
|
94dbdb3411fd41e325b03f17e171509fb64c8696
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/github/tests/IssueEvent.py
|
adamlkl/GithubDataVisualisation
|
94dbdb3411fd41e325b03f17e171509fb64c8696
|
[
"MIT"
] | 1
|
2021-11-05T22:16:58.000Z
|
2021-11-05T22:16:58.000Z
|
# -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from . import Framework
import datetime
class IssueEvent(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
repo = self.g.get_repo("PyGithub/PyGithub", lazy=True)
# From Issue #30
self.event_subscribed = repo.get_issues_event(16347479)
self.event_assigned = repo.get_issues_event(16347480)
self.event_referenced = repo.get_issues_event(16348656)
self.event_closed = repo.get_issues_event(16351220)
self.event_labeled = repo.get_issues_event(98136337)
# From Issue 538
self.event_mentioned = repo.get_issues_event(1009034767)
self.event_merged = repo.get_issues_event(1015402964)
self.event_review_requested = repo.get_issues_event(1011101309)
# From Issue 857
self.event_reopened = repo.get_issues_event(1782463023)
self.event_unassigned = repo.get_issues_event(1782463379)
self.event_unlabeled = repo.get_issues_event(1782463917)
self.event_renamed = repo.get_issues_event(1782472556)
self.event_base_ref_changed = repo.get_issues_event(1782915693)
self.event_head_ref_deleted = repo.get_issues_event(1782917185)
self.event_head_ref_restored = repo.get_issues_event(1782917299)
self.event_milestoned = repo.get_issues_event(1783596418)
self.event_demilestoned = repo.get_issues_event(1783596452)
self.event_locked = repo.get_issues_event(1783596743)
self.event_unlocked = repo.get_issues_event(1783596818)
self.event_review_dismissed = repo.get_issues_event(1783605084)
self.event_review_request_removed = repo.get_issues_event(1783779835)
self.event_marked_as_duplicate = repo.get_issues_event(1783779725)
self.event_unmarked_as_duplicate = repo.get_issues_event(1789228962)
self.event_added_to_project = repo.get_issues_event(1791766828)
self.event_moved_columns_in_project = repo.get_issues_event(1791767766)
self.event_removed_from_project = repo.get_issues_event(1791768212)
# From Issue 866
self.event_converted_note_to_issue = repo.get_issues_event(1791769149)
def testEvent_subscribed_Attributes(self):
self.assertEqual(self.event_subscribed.actor.login, "jacquev6")
self.assertEqual(self.event_subscribed.commit_id, None)
self.assertEqual(self.event_subscribed.created_at, datetime.datetime(2012, 5, 27, 5, 40, 15))
self.assertEqual(self.event_subscribed.event, "subscribed")
self.assertEqual(self.event_subscribed.id, 16347479)
self.assertEqual(self.event_subscribed.issue.number, 30)
self.assertEqual(self.event_subscribed.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/16347479")
self.assertEqual(self.event_subscribed.node_id, "MDE1OlN1YnNjcmliZWRFdmVudDE2MzQ3NDc5")
self.assertEqual(self.event_subscribed.commit_url, None)
self.assertEqual(self.event_subscribed.label, None)
self.assertEqual(self.event_subscribed.assignee, None)
self.assertEqual(self.event_subscribed.assigner, None)
self.assertEqual(self.event_subscribed.review_requester, None)
self.assertEqual(self.event_subscribed.requested_reviewer, None)
self.assertEqual(self.event_subscribed.milestone, None)
self.assertEqual(self.event_subscribed.rename, None)
self.assertEqual(self.event_subscribed.dismissed_review, None)
self.assertEqual(self.event_subscribed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_subscribed.__repr__(), 'IssueEvent(id=16347479)')
def testEvent_assigned_Attributes(self):
self.assertEqual(self.event_assigned.actor.login, "jacquev6")
self.assertEqual(self.event_assigned.commit_id, None)
self.assertEqual(self.event_assigned.created_at, datetime.datetime(2012, 5, 27, 5, 40, 15))
self.assertEqual(self.event_assigned.event, "assigned")
self.assertEqual(self.event_assigned.id, 16347480)
self.assertEqual(self.event_assigned.issue.number, 30)
self.assertEqual(self.event_assigned.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/16347480")
self.assertEqual(self.event_assigned.node_id, "MDEzOkFzc2lnbmVkRXZlbnQxNjM0NzQ4MA==")
self.assertEqual(self.event_assigned.commit_url, None)
self.assertEqual(self.event_assigned.label, None)
self.assertEqual(self.event_assigned.assignee.login, "jacquev6")
self.assertEqual(self.event_assigned.assigner.login, "ghost")
self.assertEqual(self.event_assigned.review_requester, None)
self.assertEqual(self.event_assigned.requested_reviewer, None)
self.assertEqual(self.event_assigned.milestone, None)
self.assertEqual(self.event_assigned.rename, None)
self.assertEqual(self.event_assigned.dismissed_review, None)
self.assertEqual(self.event_assigned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_assigned.__repr__(), 'IssueEvent(id=16347480)')
def testEvent_referenced_Attributes(self):
self.assertEqual(self.event_referenced.actor.login, "jacquev6")
self.assertEqual(self.event_referenced.commit_id, "ed866fc43833802ab553e5ff8581c81bb00dd433")
self.assertEqual(self.event_referenced.created_at, datetime.datetime(2012, 5, 27, 7, 29, 25))
self.assertEqual(self.event_referenced.event, "referenced")
self.assertEqual(self.event_referenced.id, 16348656)
self.assertEqual(self.event_referenced.issue.number, 30)
self.assertEqual(self.event_referenced.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/16348656")
self.assertEqual(self.event_referenced.node_id, "MDE1OlJlZmVyZW5jZWRFdmVudDE2MzQ4NjU2")
self.assertEqual(self.event_referenced.commit_url, "https://api.github.com/repos/PyGithub/PyGithub/commits/ed866fc43833802ab553e5ff8581c81bb00dd433")
self.assertEqual(self.event_referenced.label, None)
self.assertEqual(self.event_referenced.assignee, None)
self.assertEqual(self.event_referenced.assigner, None)
self.assertEqual(self.event_referenced.review_requester, None)
self.assertEqual(self.event_referenced.requested_reviewer, None)
self.assertEqual(self.event_referenced.milestone, None)
self.assertEqual(self.event_referenced.rename, None)
self.assertEqual(self.event_referenced.dismissed_review, None)
self.assertEqual(self.event_referenced.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_referenced.__repr__(), 'IssueEvent(id=16348656)')
def testEvent_closed_Attributes(self):
self.assertEqual(self.event_closed.actor.login, "jacquev6")
self.assertEqual(self.event_closed.commit_id, None)
self.assertEqual(self.event_closed.created_at, datetime.datetime(2012, 5, 27, 11, 4, 25))
self.assertEqual(self.event_closed.event, "closed")
self.assertEqual(self.event_closed.id, 16351220)
self.assertEqual(self.event_closed.issue.number, 30)
self.assertEqual(self.event_closed.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/16351220")
self.assertEqual(self.event_closed.node_id, "MDExOkNsb3NlZEV2ZW50MTYzNTEyMjA=")
self.assertEqual(self.event_closed.commit_url, None)
self.assertEqual(self.event_closed.label, None)
self.assertEqual(self.event_closed.assignee, None)
self.assertEqual(self.event_closed.assigner, None)
self.assertEqual(self.event_closed.review_requester, None)
self.assertEqual(self.event_closed.requested_reviewer, None)
self.assertEqual(self.event_closed.milestone, None)
self.assertEqual(self.event_closed.rename, None)
self.assertEqual(self.event_closed.dismissed_review, None)
self.assertEqual(self.event_closed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_closed.__repr__(), 'IssueEvent(id=16351220)')
def testEvent_labeled_Attributes(self):
self.assertEqual(self.event_labeled.actor.login, "jacquev6")
self.assertEqual(self.event_labeled.commit_id, None)
self.assertEqual(self.event_labeled.created_at, datetime.datetime(2014, 3, 2, 18, 55, 10))
self.assertEqual(self.event_labeled.event, "labeled")
self.assertEqual(self.event_labeled.id, 98136337)
self.assertEqual(self.event_labeled.issue.number, 30)
self.assertEqual(self.event_labeled.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/98136337")
self.assertEqual(self.event_labeled.node_id, "MDEyOkxhYmVsZWRFdmVudDk4MTM2MzM3")
self.assertEqual(self.event_labeled.commit_url, None)
self.assertEqual(self.event_labeled.label.name, "v1")
self.assertEqual(self.event_labeled.assignee, None)
self.assertEqual(self.event_labeled.assigner, None)
self.assertEqual(self.event_labeled.review_requester, None)
self.assertEqual(self.event_labeled.requested_reviewer, None)
self.assertEqual(self.event_labeled.milestone, None)
self.assertEqual(self.event_labeled.rename, None)
self.assertEqual(self.event_labeled.dismissed_review, None)
self.assertEqual(self.event_labeled.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_labeled.__repr__(), 'IssueEvent(id=98136337)')
def testEvent_mentioned_Attributes(self):
self.assertEqual(self.event_mentioned.actor.login, "jzelinskie")
self.assertEqual(self.event_mentioned.commit_id, None)
self.assertEqual(self.event_mentioned.created_at, datetime.datetime(2017, 3, 21, 17, 30, 14))
self.assertEqual(self.event_mentioned.event, "mentioned")
self.assertEqual(self.event_mentioned.id, 1009034767)
self.assertEqual(self.event_mentioned.issue.number, 538)
self.assertEqual(self.event_mentioned.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1009034767")
self.assertEqual(self.event_mentioned.node_id, "MDE0Ok1lbnRpb25lZEV2ZW50MTAwOTAzNDc2Nw==")
self.assertEqual(self.event_mentioned.commit_url, None)
self.assertEqual(self.event_mentioned.label, None)
self.assertEqual(self.event_mentioned.assignee, None)
self.assertEqual(self.event_mentioned.assigner, None)
self.assertEqual(self.event_mentioned.review_requester, None)
self.assertEqual(self.event_mentioned.requested_reviewer, None)
self.assertEqual(self.event_mentioned.milestone, None)
self.assertEqual(self.event_mentioned.rename, None)
self.assertEqual(self.event_mentioned.dismissed_review, None)
self.assertEqual(self.event_mentioned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_mentioned.__repr__(), 'IssueEvent(id=1009034767)')
def testEvent_merged_Attributes(self):
self.assertEqual(self.event_merged.actor.login, "jzelinskie")
self.assertEqual(self.event_merged.commit_id, "2525515b094d7425f7018eb5b66171e21c5fbc10")
self.assertEqual(self.event_merged.created_at, datetime.datetime(2017, 3, 25, 16, 52, 49))
self.assertEqual(self.event_merged.event, "merged")
self.assertEqual(self.event_merged.id, 1015402964)
self.assertEqual(self.event_merged.issue.number, 538)
self.assertEqual(self.event_merged.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1015402964")
self.assertEqual(self.event_merged.node_id, "MDExOk1lcmdlZEV2ZW50MTAxNTQwMjk2NA==")
self.assertEqual(self.event_merged.commit_url, "https://api.github.com/repos/PyGithub/PyGithub/commits/2525515b094d7425f7018eb5b66171e21c5fbc10")
self.assertEqual(self.event_merged.label, None)
self.assertEqual(self.event_merged.assignee, None)
self.assertEqual(self.event_merged.assigner, None)
self.assertEqual(self.event_merged.review_requester, None)
self.assertEqual(self.event_merged.requested_reviewer, None)
self.assertEqual(self.event_merged.milestone, None)
self.assertEqual(self.event_merged.rename, None)
self.assertEqual(self.event_merged.dismissed_review, None)
self.assertEqual(self.event_merged.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_merged.__repr__(), 'IssueEvent(id=1015402964)')
def testEvent_review_requested_Attributes(self):
self.assertEqual(self.event_review_requested.actor.login, "jzelinskie")
self.assertEqual(self.event_review_requested.commit_id, None)
self.assertEqual(self.event_review_requested.created_at, datetime.datetime(2017, 3, 22, 19, 6, 44))
self.assertEqual(self.event_review_requested.event, "review_requested")
self.assertEqual(self.event_review_requested.id, 1011101309)
self.assertEqual(self.event_review_requested.issue.number, 538)
self.assertEqual(self.event_review_requested.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1011101309")
self.assertEqual(self.event_review_requested.node_id, "MDIwOlJldmlld1JlcXVlc3RlZEV2ZW50MTAxMTEwMTMwOQ==")
self.assertEqual(self.event_review_requested.commit_url, None)
self.assertEqual(self.event_review_requested.label, None)
self.assertEqual(self.event_review_requested.assignee, None)
self.assertEqual(self.event_review_requested.assigner, None)
self.assertEqual(self.event_review_requested.review_requester.login, "jzelinskie")
self.assertEqual(self.event_review_requested.requested_reviewer.login, "jzelinskie")
self.assertEqual(self.event_review_requested.milestone, None)
self.assertEqual(self.event_review_requested.rename, None)
self.assertEqual(self.event_review_requested.dismissed_review, None)
self.assertEqual(self.event_review_requested.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_review_requested.__repr__(), 'IssueEvent(id=1011101309)')
def testEvent_reopened_Attributes(self):
self.assertEqual(self.event_reopened.actor.login, "sfdye")
self.assertEqual(self.event_reopened.commit_id, None)
self.assertEqual(self.event_reopened.created_at, datetime.datetime(2018, 8, 10, 13, 10, 9))
self.assertEqual(self.event_reopened.event, "reopened")
self.assertEqual(self.event_reopened.id, 1782463023)
self.assertEqual(self.event_reopened.issue.number, 857)
self.assertEqual(self.event_reopened.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782463023")
self.assertEqual(self.event_reopened.node_id, "MDEzOlJlb3BlbmVkRXZlbnQxNzgyNDYzMDIz")
self.assertEqual(self.event_reopened.commit_url, None)
self.assertEqual(self.event_reopened.label, None)
self.assertEqual(self.event_reopened.assignee, None)
self.assertEqual(self.event_reopened.assigner, None)
self.assertEqual(self.event_reopened.review_requester, None)
self.assertEqual(self.event_reopened.requested_reviewer, None)
self.assertEqual(self.event_reopened.milestone, None)
self.assertEqual(self.event_reopened.rename, None)
self.assertEqual(self.event_reopened.dismissed_review, None)
self.assertEqual(self.event_reopened.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_reopened.__repr__(), 'IssueEvent(id=1782463023)')
def testEvent_unassigned_Attributes(self):
self.assertEqual(self.event_unassigned.actor.login, "sfdye")
self.assertEqual(self.event_unassigned.commit_id, None)
self.assertEqual(self.event_unassigned.created_at, datetime.datetime(2018, 8, 10, 13, 10, 21))
self.assertEqual(self.event_unassigned.event, "unassigned")
self.assertEqual(self.event_unassigned.id, 1782463379)
self.assertEqual(self.event_unassigned.issue.number, 857)
self.assertEqual(self.event_unassigned.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782463379")
self.assertEqual(self.event_unassigned.node_id, "MDE1OlVuYXNzaWduZWRFdmVudDE3ODI0NjMzNzk=")
self.assertEqual(self.event_unassigned.commit_url, None)
self.assertEqual(self.event_unassigned.label, None)
self.assertEqual(self.event_unassigned.actor.login, "sfdye")
self.assertEqual(self.event_unassigned.actor.login, "sfdye")
self.assertEqual(self.event_unassigned.review_requester, None)
self.assertEqual(self.event_unassigned.requested_reviewer, None)
self.assertEqual(self.event_unassigned.milestone, None)
self.assertEqual(self.event_unassigned.rename, None)
self.assertEqual(self.event_unassigned.dismissed_review, None)
self.assertEqual(self.event_unassigned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_unassigned.__repr__(), 'IssueEvent(id=1782463379)')
def testEvent_unlabeled_Attributes(self):
self.assertEqual(self.event_unlabeled.actor.login, "sfdye")
self.assertEqual(self.event_unlabeled.commit_id, None)
self.assertEqual(self.event_unlabeled.created_at, datetime.datetime(2018, 8, 10, 13, 10, 38))
self.assertEqual(self.event_unlabeled.event, "unlabeled")
self.assertEqual(self.event_unlabeled.id, 1782463917)
self.assertEqual(self.event_unlabeled.issue.number, 857)
self.assertEqual(self.event_unlabeled.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782463917")
self.assertEqual(self.event_unlabeled.node_id, "MDE0OlVubGFiZWxlZEV2ZW50MTc4MjQ2MzkxNw==")
self.assertEqual(self.event_unlabeled.commit_url, None)
self.assertEqual(self.event_unlabeled.label.name, "improvement")
self.assertEqual(self.event_unlabeled.assignee, None)
self.assertEqual(self.event_unlabeled.assigner, None)
self.assertEqual(self.event_unlabeled.review_requester, None)
self.assertEqual(self.event_unlabeled.requested_reviewer, None)
self.assertEqual(self.event_unlabeled.milestone, None)
self.assertEqual(self.event_unlabeled.rename, None)
self.assertEqual(self.event_unlabeled.dismissed_review, None)
self.assertEqual(self.event_unlabeled.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_unlabeled.__repr__(), 'IssueEvent(id=1782463917)')
def testEvent_renamed_Attributes(self):
self.assertEqual(self.event_renamed.actor.login, "sfdye")
self.assertEqual(self.event_renamed.commit_id, None)
self.assertEqual(self.event_renamed.created_at, datetime.datetime(2018, 8, 10, 13, 15, 18))
self.assertEqual(self.event_renamed.event, "renamed")
self.assertEqual(self.event_renamed.id, 1782472556)
self.assertEqual(self.event_renamed.issue.number, 857)
self.assertEqual(self.event_renamed.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782472556")
self.assertEqual(self.event_renamed.node_id, "MDE3OlJlbmFtZWRUaXRsZUV2ZW50MTc4MjQ3MjU1Ng==")
self.assertEqual(self.event_renamed.commit_url, None)
self.assertEqual(self.event_renamed.label, None)
self.assertEqual(self.event_renamed.assignee, None)
self.assertEqual(self.event_renamed.assigner, None)
self.assertEqual(self.event_renamed.review_requester, None)
self.assertEqual(self.event_renamed.requested_reviewer, None)
self.assertEqual(self.event_renamed.milestone, None)
self.assertEqual(self.event_renamed.rename, {'to': 'Adding new attributes to IssueEvent', 'from': 'Adding new attributes to IssueEvent Object (DO NOT MERGE - SEE NOTES)'})
self.assertEqual(self.event_renamed.dismissed_review, None)
self.assertEqual(self.event_renamed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_renamed.__repr__(), 'IssueEvent(id=1782472556)')
def testEvent_base_ref_changed_Attributes(self):
self.assertEqual(self.event_base_ref_changed.actor.login, "allevin")
self.assertEqual(self.event_base_ref_changed.commit_id, None)
self.assertEqual(self.event_base_ref_changed.created_at, datetime.datetime(2018, 8, 10, 16, 38, 22))
self.assertEqual(self.event_base_ref_changed.event, "base_ref_changed")
self.assertEqual(self.event_base_ref_changed.id, 1782915693)
self.assertEqual(self.event_base_ref_changed.issue.number, 857)
self.assertEqual(self.event_base_ref_changed.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782915693")
self.assertEqual(self.event_base_ref_changed.node_id, "MDE5OkJhc2VSZWZDaGFuZ2VkRXZlbnQxNzgyOTE1Njkz")
self.assertEqual(self.event_base_ref_changed.commit_url, None)
self.assertEqual(self.event_base_ref_changed.label, None)
self.assertEqual(self.event_base_ref_changed.assignee, None)
self.assertEqual(self.event_base_ref_changed.assigner, None)
self.assertEqual(self.event_base_ref_changed.review_requester, None)
self.assertEqual(self.event_base_ref_changed.requested_reviewer, None)
self.assertEqual(self.event_base_ref_changed.milestone, None)
self.assertEqual(self.event_head_ref_deleted.rename, None)
self.assertEqual(self.event_base_ref_changed.dismissed_review, None)
self.assertEqual(self.event_base_ref_changed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_base_ref_changed.__repr__(), 'IssueEvent(id=1782915693)')
def testEvent_head_ref_deleted_Attributes(self):
self.assertEqual(self.event_head_ref_deleted.actor.login, "allevin")
self.assertEqual(self.event_head_ref_deleted.commit_id, None)
self.assertEqual(self.event_head_ref_deleted.created_at, datetime.datetime(2018, 8, 10, 16, 39, 20))
self.assertEqual(self.event_head_ref_deleted.event, "head_ref_deleted")
self.assertEqual(self.event_head_ref_deleted.id, 1782917185)
self.assertEqual(self.event_head_ref_deleted.issue.number, 857)
self.assertEqual(self.event_head_ref_deleted.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782917185")
self.assertEqual(self.event_head_ref_deleted.node_id, "MDE5OkhlYWRSZWZEZWxldGVkRXZlbnQxNzgyOTE3MTg1")
self.assertEqual(self.event_head_ref_deleted.commit_url, None)
self.assertEqual(self.event_head_ref_deleted.label, None)
self.assertEqual(self.event_head_ref_deleted.assignee, None)
self.assertEqual(self.event_head_ref_deleted.assigner, None)
self.assertEqual(self.event_head_ref_deleted.review_requester, None)
self.assertEqual(self.event_head_ref_deleted.requested_reviewer, None)
self.assertEqual(self.event_head_ref_deleted.milestone, None)
self.assertEqual(self.event_head_ref_deleted.rename, None)
self.assertEqual(self.event_head_ref_deleted.dismissed_review, None)
self.assertEqual(self.event_head_ref_deleted.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_head_ref_deleted.__repr__(), 'IssueEvent(id=1782917185)')
def testEvent_head_ref_restored_Attributes(self):
self.assertEqual(self.event_head_ref_restored.actor.login, "allevin")
self.assertEqual(self.event_head_ref_restored.commit_id, None)
self.assertEqual(self.event_head_ref_restored.created_at, datetime.datetime(2018, 8, 10, 16, 39, 23))
self.assertEqual(self.event_head_ref_restored.event, "head_ref_restored")
self.assertEqual(self.event_head_ref_restored.id, 1782917299)
self.assertEqual(self.event_head_ref_restored.issue.number, 857)
self.assertEqual(self.event_head_ref_restored.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1782917299")
self.assertEqual(self.event_head_ref_restored.node_id, "MDIwOkhlYWRSZWZSZXN0b3JlZEV2ZW50MTc4MjkxNzI5OQ==")
self.assertEqual(self.event_head_ref_restored.commit_url, None)
self.assertEqual(self.event_head_ref_restored.label, None)
self.assertEqual(self.event_head_ref_restored.assignee, None)
self.assertEqual(self.event_head_ref_restored.assigner, None)
self.assertEqual(self.event_head_ref_restored.review_requester, None)
self.assertEqual(self.event_head_ref_restored.requested_reviewer, None)
self.assertEqual(self.event_head_ref_restored.milestone, None)
self.assertEqual(self.event_head_ref_deleted.rename, None)
self.assertEqual(self.event_head_ref_restored.dismissed_review, None)
self.assertEqual(self.event_head_ref_deleted.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_head_ref_restored.__repr__(), 'IssueEvent(id=1782917299)')
def testEvent_milestoned_Attributes(self):
self.assertEqual(self.event_milestoned.actor.login, "sfdye")
self.assertEqual(self.event_milestoned.commit_id, None)
self.assertEqual(self.event_milestoned.created_at, datetime.datetime(2018, 8, 11, 0, 46, 19))
self.assertEqual(self.event_milestoned.event, "milestoned")
self.assertEqual(self.event_milestoned.id, 1783596418)
self.assertEqual(self.event_milestoned.issue.number, 857)
self.assertEqual(self.event_milestoned.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783596418")
self.assertEqual(self.event_milestoned.node_id, "MDE1Ok1pbGVzdG9uZWRFdmVudDE3ODM1OTY0MTg=")
self.assertEqual(self.event_milestoned.commit_url, None)
self.assertEqual(self.event_milestoned.label, None)
self.assertEqual(self.event_milestoned.assignee, None)
self.assertEqual(self.event_milestoned.assigner, None)
self.assertEqual(self.event_milestoned.review_requester, None)
self.assertEqual(self.event_milestoned.requested_reviewer, None)
self.assertEqual(self.event_milestoned.milestone.title, "Version 1.25.0")
self.assertEqual(self.event_milestoned.rename, None)
self.assertEqual(self.event_milestoned.dismissed_review, None)
self.assertEqual(self.event_milestoned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_milestoned.__repr__(), 'IssueEvent(id=1783596418)')
def testEvent_demilestoned_Attributes(self):
self.assertEqual(self.event_demilestoned.actor.login, "sfdye")
self.assertEqual(self.event_demilestoned.commit_id, None)
self.assertEqual(self.event_demilestoned.created_at, datetime.datetime(2018, 8, 11, 0, 46, 22))
self.assertEqual(self.event_demilestoned.event, "demilestoned")
self.assertEqual(self.event_demilestoned.id, 1783596452)
self.assertEqual(self.event_demilestoned.issue.number, 857)
self.assertEqual(self.event_demilestoned.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783596452")
self.assertEqual(self.event_demilestoned.node_id, "MDE3OkRlbWlsZXN0b25lZEV2ZW50MTc4MzU5NjQ1Mg==")
self.assertEqual(self.event_demilestoned.commit_url, None)
self.assertEqual(self.event_demilestoned.label, None)
self.assertEqual(self.event_demilestoned.assignee, None)
self.assertEqual(self.event_demilestoned.assigner, None)
self.assertEqual(self.event_demilestoned.review_requester, None)
self.assertEqual(self.event_demilestoned.requested_reviewer, None)
self.assertEqual(self.event_demilestoned.milestone.title, "Version 1.25.0")
self.assertEqual(self.event_demilestoned.rename, None)
self.assertEqual(self.event_demilestoned.dismissed_review, None)
self.assertEqual(self.event_demilestoned.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_demilestoned.__repr__(), 'IssueEvent(id=1783596452)')
def testEvent_locked_Attributes(self):
self.assertEqual(self.event_locked.actor.login, "PyGithub")
self.assertEqual(self.event_locked.commit_id, None)
self.assertEqual(self.event_locked.created_at, datetime.datetime(2018, 8, 11, 0, 46, 56))
self.assertEqual(self.event_locked.event, "locked")
self.assertEqual(self.event_locked.id, 1783596743)
self.assertEqual(self.event_locked.issue.number, 857)
self.assertEqual(self.event_locked.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783596743")
self.assertEqual(self.event_locked.node_id, "MDExOkxvY2tlZEV2ZW50MTc4MzU5Njc0Mw==")
self.assertEqual(self.event_locked.commit_url, None)
self.assertEqual(self.event_locked.label, None)
self.assertEqual(self.event_locked.assignee, None)
self.assertEqual(self.event_locked.assigner, None)
self.assertEqual(self.event_locked.review_requester, None)
self.assertEqual(self.event_locked.requested_reviewer, None)
self.assertEqual(self.event_locked.milestone, None)
self.assertEqual(self.event_locked.rename, None)
self.assertEqual(self.event_locked.dismissed_review, None)
self.assertEqual(self.event_locked.lock_reason, "too heated")
### # test __repr__() based on this attributes
self.assertEqual(self.event_locked.__repr__(), 'IssueEvent(id=1783596743)')
def testEvent_unlocked_Attributes(self):
self.assertEqual(self.event_unlocked.actor.login, "PyGithub")
self.assertEqual(self.event_unlocked.commit_id, None)
self.assertEqual(self.event_unlocked.created_at, datetime.datetime(2018, 8, 11, 0, 47, 7))
self.assertEqual(self.event_unlocked.event, "unlocked")
self.assertEqual(self.event_unlocked.id, 1783596818)
self.assertEqual(self.event_unlocked.issue.number, 857)
self.assertEqual(self.event_unlocked.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783596818")
self.assertEqual(self.event_unlocked.node_id, "MDEzOlVubG9ja2VkRXZlbnQxNzgzNTk2ODE4")
self.assertEqual(self.event_unlocked.commit_url, None)
self.assertEqual(self.event_unlocked.label, None)
self.assertEqual(self.event_unlocked.assignee, None)
self.assertEqual(self.event_unlocked.assigner, None)
self.assertEqual(self.event_unlocked.review_requester, None)
self.assertEqual(self.event_unlocked.requested_reviewer, None)
self.assertEqual(self.event_unlocked.milestone, None)
self.assertEqual(self.event_unlocked.rename, None)
self.assertEqual(self.event_unlocked.dismissed_review, None)
self.assertEqual(self.event_unlocked.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_unlocked.__repr__(), 'IssueEvent(id=1783596818)')
def testEvent_review_dismissed_Attributes(self):
self.assertEqual(self.event_review_dismissed.actor.login, "sfdye")
self.assertEqual(self.event_review_dismissed.commit_id, None)
self.assertEqual(self.event_review_dismissed.created_at, datetime.datetime(2018, 8, 11, 1, 7, 10))
self.assertEqual(self.event_review_dismissed.event, "review_dismissed")
self.assertEqual(self.event_review_dismissed.id, 1783605084)
self.assertEqual(self.event_review_dismissed.issue.number, 857)
self.assertEqual(self.event_review_dismissed.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783605084")
self.assertEqual(self.event_review_dismissed.node_id, "MDIwOlJldmlld0Rpc21pc3NlZEV2ZW50MTc4MzYwNTA4NA==")
self.assertEqual(self.event_review_dismissed.commit_url, None)
self.assertEqual(self.event_review_dismissed.label, None)
self.assertEqual(self.event_review_dismissed.assignee, None)
self.assertEqual(self.event_review_dismissed.assigner, None)
self.assertEqual(self.event_review_dismissed.review_requester, None)
self.assertEqual(self.event_review_dismissed.requested_reviewer, None)
self.assertEqual(self.event_review_dismissed.milestone, None)
self.assertEqual(self.event_review_dismissed.rename, None)
self.assertEqual(self.event_review_dismissed.dismissed_review, {'dismissal_message': 'dismiss', 'state': 'changes_requested', 'review_id': 145431295})
self.assertEqual(self.event_review_dismissed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_review_dismissed.__repr__(), 'IssueEvent(id=1783605084)')
def testEvent_review_request_removed_Attributes(self):
self.assertEqual(self.event_review_request_removed.actor.login, "sfdye")
self.assertEqual(self.event_review_request_removed.commit_id, None)
self.assertEqual(self.event_review_request_removed.created_at, datetime.datetime(2018, 8, 11, 12, 32, 59))
self.assertEqual(self.event_review_request_removed.event, "review_request_removed")
self.assertEqual(self.event_review_request_removed.id, 1783779835)
self.assertEqual(self.event_review_request_removed.issue.number, 857)
self.assertEqual(self.event_review_request_removed.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783779835")
self.assertEqual(self.event_review_request_removed.node_id, "MDI1OlJldmlld1JlcXVlc3RSZW1vdmVkRXZlbnQxNzgzNzc5ODM1")
self.assertEqual(self.event_review_request_removed.commit_url, None)
self.assertEqual(self.event_review_request_removed.label, None)
self.assertEqual(self.event_review_request_removed.assignee, None)
self.assertEqual(self.event_review_request_removed.assigner, None)
self.assertEqual(self.event_review_request_removed.review_requester.login, "sfdye")
self.assertEqual(self.event_review_request_removed.requested_reviewer.login, "jasonwhite")
self.assertEqual(self.event_review_request_removed.milestone, None)
self.assertEqual(self.event_review_request_removed.rename, None)
self.assertEqual(self.event_review_request_removed.dismissed_review, None)
self.assertEqual(self.event_review_request_removed.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_review_request_removed.__repr__(), 'IssueEvent(id=1783779835)')
def testEvent_marked_as_duplicate_Attributes(self):
self.assertEqual(self.event_marked_as_duplicate.actor.login, "sfdye")
self.assertEqual(self.event_marked_as_duplicate.commit_id, None)
self.assertEqual(self.event_marked_as_duplicate.created_at, datetime.datetime(2018, 8, 11, 12, 32, 35))
self.assertEqual(self.event_marked_as_duplicate.event, "marked_as_duplicate")
self.assertEqual(self.event_marked_as_duplicate.id, 1783779725)
self.assertEqual(self.event_marked_as_duplicate.issue.number, 857)
self.assertEqual(self.event_marked_as_duplicate.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1783779725")
self.assertEqual(self.event_marked_as_duplicate.node_id, "MDIyOk1hcmtlZEFzRHVwbGljYXRlRXZlbnQxNzgzNzc5NzI1")
self.assertEqual(self.event_marked_as_duplicate.commit_url, None)
self.assertEqual(self.event_marked_as_duplicate.label, None)
self.assertEqual(self.event_marked_as_duplicate.assignee, None)
self.assertEqual(self.event_marked_as_duplicate.assigner, None)
self.assertEqual(self.event_marked_as_duplicate.review_requester, None)
self.assertEqual(self.event_marked_as_duplicate.requested_reviewer, None)
self.assertEqual(self.event_marked_as_duplicate.milestone, None)
self.assertEqual(self.event_marked_as_duplicate.rename, None)
self.assertEqual(self.event_marked_as_duplicate.dismissed_review, None)
self.assertEqual(self.event_marked_as_duplicate.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_marked_as_duplicate.__repr__(), 'IssueEvent(id=1783779725)')
def testEvent_unmarked_as_duplicate_Attributes(self):
self.assertEqual(self.event_unmarked_as_duplicate.actor.login, "sfdye")
self.assertEqual(self.event_unmarked_as_duplicate.commit_id, None)
self.assertEqual(self.event_unmarked_as_duplicate.created_at, datetime.datetime(2018, 8, 15, 2, 57, 46))
self.assertEqual(self.event_unmarked_as_duplicate.event, "unmarked_as_duplicate")
self.assertEqual(self.event_unmarked_as_duplicate.id, 1789228962)
self.assertEqual(self.event_unmarked_as_duplicate.issue.number, 857)
self.assertEqual(self.event_unmarked_as_duplicate.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1789228962")
self.assertEqual(self.event_unmarked_as_duplicate.node_id, "MDI0OlVubWFya2VkQXNEdXBsaWNhdGVFdmVudDE3ODkyMjg5NjI=")
self.assertEqual(self.event_unmarked_as_duplicate.commit_url, None)
self.assertEqual(self.event_unmarked_as_duplicate.label, None)
self.assertEqual(self.event_unmarked_as_duplicate.assignee, None)
self.assertEqual(self.event_unmarked_as_duplicate.assigner, None)
self.assertEqual(self.event_unmarked_as_duplicate.review_requester, None)
self.assertEqual(self.event_unmarked_as_duplicate.requested_reviewer, None)
self.assertEqual(self.event_unmarked_as_duplicate.milestone, None)
self.assertEqual(self.event_unmarked_as_duplicate.rename, None)
self.assertEqual(self.event_unmarked_as_duplicate.dismissed_review, None)
self.assertEqual(self.event_unmarked_as_duplicate.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_unmarked_as_duplicate.__repr__(), 'IssueEvent(id=1789228962)')
def testEvent_added_to_project_Attributes(self):
self.assertEqual(self.event_added_to_project.actor.login, "sfdye")
self.assertEqual(self.event_added_to_project.commit_id, None)
self.assertEqual(self.event_added_to_project.created_at, datetime.datetime(2018, 8, 16, 8, 13, 24))
self.assertEqual(self.event_added_to_project.event, "added_to_project")
self.assertEqual(self.event_added_to_project.id, 1791766828)
self.assertEqual(self.event_added_to_project.issue.number, 857)
self.assertEqual(self.event_added_to_project.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1791766828")
self.assertEqual(self.event_added_to_project.node_id, "MDE5OkFkZGVkVG9Qcm9qZWN0RXZlbnQxNzkxNzY2ODI4")
self.assertEqual(self.event_added_to_project.commit_url, None)
self.assertEqual(self.event_added_to_project.label, None)
self.assertEqual(self.event_added_to_project.assignee, None)
self.assertEqual(self.event_added_to_project.assigner, None)
self.assertEqual(self.event_added_to_project.review_requester, None)
self.assertEqual(self.event_added_to_project.requested_reviewer, None)
self.assertEqual(self.event_added_to_project.milestone, None)
self.assertEqual(self.event_added_to_project.rename, None)
self.assertEqual(self.event_added_to_project.dismissed_review, None)
self.assertEqual(self.event_added_to_project.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_added_to_project.__repr__(), 'IssueEvent(id=1791766828)')
def testEvent_moved_columns_in_project_Attributes(self):
self.assertEqual(self.event_moved_columns_in_project.actor.login, "sfdye")
self.assertEqual(self.event_moved_columns_in_project.commit_id, None)
self.assertEqual(self.event_moved_columns_in_project.created_at, datetime.datetime(2018, 8, 16, 8, 13, 55))
self.assertEqual(self.event_moved_columns_in_project.event, "moved_columns_in_project")
self.assertEqual(self.event_moved_columns_in_project.id, 1791767766)
self.assertEqual(self.event_moved_columns_in_project.issue.number, 857)
self.assertEqual(self.event_moved_columns_in_project.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1791767766")
self.assertEqual(self.event_moved_columns_in_project.node_id, "MDI2Ok1vdmVkQ29sdW1uc0luUHJvamVjdEV2ZW50MTc5MTc2Nzc2Ng==")
self.assertEqual(self.event_moved_columns_in_project.commit_url, None)
self.assertEqual(self.event_moved_columns_in_project.label, None)
self.assertEqual(self.event_moved_columns_in_project.assignee, None)
self.assertEqual(self.event_moved_columns_in_project.assigner, None)
self.assertEqual(self.event_moved_columns_in_project.review_requester, None)
self.assertEqual(self.event_moved_columns_in_project.requested_reviewer, None)
self.assertEqual(self.event_moved_columns_in_project.milestone, None)
self.assertEqual(self.event_moved_columns_in_project.rename, None)
self.assertEqual(self.event_moved_columns_in_project.dismissed_review, None)
self.assertEqual(self.event_moved_columns_in_project.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_moved_columns_in_project.__repr__(), 'IssueEvent(id=1791767766)')
def testEvent_removed_from_project_Attributes(self):
self.assertEqual(self.event_removed_from_project.actor.login, "sfdye")
self.assertEqual(self.event_removed_from_project.commit_id, None)
self.assertEqual(self.event_removed_from_project.created_at, datetime.datetime(2018, 8, 16, 8, 14, 8))
self.assertEqual(self.event_removed_from_project.event, "removed_from_project")
self.assertEqual(self.event_removed_from_project.id, 1791768212)
self.assertEqual(self.event_removed_from_project.issue.number, 857)
self.assertEqual(self.event_removed_from_project.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1791768212")
self.assertEqual(self.event_removed_from_project.node_id, "MDIzOlJlbW92ZWRGcm9tUHJvamVjdEV2ZW50MTc5MTc2ODIxMg==")
self.assertEqual(self.event_removed_from_project.commit_url, None)
self.assertEqual(self.event_removed_from_project.label, None)
self.assertEqual(self.event_removed_from_project.assignee, None)
self.assertEqual(self.event_removed_from_project.assigner, None)
self.assertEqual(self.event_removed_from_project.review_requester, None)
self.assertEqual(self.event_removed_from_project.requested_reviewer, None)
self.assertEqual(self.event_removed_from_project.milestone, None)
self.assertEqual(self.event_removed_from_project.rename, None)
self.assertEqual(self.event_removed_from_project.dismissed_review, None)
self.assertEqual(self.event_removed_from_project.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_removed_from_project.__repr__(), 'IssueEvent(id=1791768212)')
def testEvent_converted_note_to_issue_Attributes(self):
self.assertEqual(self.event_converted_note_to_issue.actor.login, "sfdye")
self.assertEqual(self.event_converted_note_to_issue.commit_id, None)
self.assertEqual(self.event_converted_note_to_issue.created_at, datetime.datetime(2018, 8, 16, 8, 14, 34))
self.assertEqual(self.event_converted_note_to_issue.event, "converted_note_to_issue")
self.assertEqual(self.event_converted_note_to_issue.id, 1791769149)
self.assertEqual(self.event_converted_note_to_issue.issue.number, 866)
self.assertEqual(self.event_converted_note_to_issue.url, "https://api.github.com/repos/PyGithub/PyGithub/issues/events/1791769149")
self.assertEqual(self.event_converted_note_to_issue.node_id, "MDI1OkNvbnZlcnRlZE5vdGVUb0lzc3VlRXZlbnQxNzkxNzY5MTQ5")
self.assertEqual(self.event_converted_note_to_issue.commit_url, None)
self.assertEqual(self.event_converted_note_to_issue.label, None)
self.assertEqual(self.event_converted_note_to_issue.assignee, None)
self.assertEqual(self.event_converted_note_to_issue.assigner, None)
self.assertEqual(self.event_converted_note_to_issue.review_requester, None)
self.assertEqual(self.event_converted_note_to_issue.requested_reviewer, None)
self.assertEqual(self.event_converted_note_to_issue.milestone, None)
self.assertEqual(self.event_converted_note_to_issue.rename, None)
self.assertEqual(self.event_converted_note_to_issue.dismissed_review, None)
self.assertEqual(self.event_converted_note_to_issue.lock_reason, None)
# test __repr__() based on this attributes
self.assertEqual(self.event_converted_note_to_issue.__repr__(), 'IssueEvent(id=1791769149)')
| 70.846269
| 179
| 0.743611
| 5,665
| 47,467
| 5.946161
| 0.055428
| 0.144278
| 0.289357
| 0.365504
| 0.845569
| 0.790738
| 0.591005
| 0.384949
| 0.224195
| 0.129256
| 0
| 0.044321
| 0.155814
| 47,467
| 669
| 180
| 70.952167
| 0.796312
| 0.068405
| 0
| 0.013962
| 0
| 0
| 0.109424
| 0.045165
| 0
| 0
| 0
| 0
| 0.895288
| 1
| 0.048866
| false
| 0
| 0.00349
| 0
| 0.054101
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4f883a94fe71908a523d580ae90c904d0a8bf941
| 254
|
py
|
Python
|
contact/admin.py
|
D-GopalKrishna/RobotixWeb2021
|
3f99d41b2c4c99a3d1a214db1489f3e2fb1bfbb2
|
[
"Apache-2.0"
] | null | null | null |
contact/admin.py
|
D-GopalKrishna/RobotixWeb2021
|
3f99d41b2c4c99a3d1a214db1489f3e2fb1bfbb2
|
[
"Apache-2.0"
] | 7
|
2020-02-12T02:54:35.000Z
|
2022-03-12T00:06:26.000Z
|
contact/admin.py
|
D-GopalKrishna/RobotixWeb2021
|
3f99d41b2c4c99a3d1a214db1489f3e2fb1bfbb2
|
[
"Apache-2.0"
] | 6
|
2020-02-10T16:37:38.000Z
|
2021-01-28T13:39:46.000Z
|
from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from .models import Contact
# Register your models here.
# admin.site.register(Contact)
@admin.register(Contact)
class ContactAdmin(ImportExportModelAdmin):
pass
| 28.222222
| 54
| 0.826772
| 30
| 254
| 6.966667
| 0.533333
| 0.143541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106299
| 254
| 8
| 55
| 31.75
| 0.920705
| 0.216535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.666667
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
96cd60a65c76685a0f6009435001bdfd6f889b19
| 95
|
py
|
Python
|
peact/__init__.py
|
glotzerlab/peact
|
ead549336d46127f1b05021a8cc2e6f3d4d298c2
|
[
"BSD-3-Clause"
] | 2
|
2019-02-09T12:29:33.000Z
|
2019-03-02T14:27:16.000Z
|
peact/__init__.py
|
glotzerlab/peact
|
ead549336d46127f1b05021a8cc2e6f3d4d298c2
|
[
"BSD-3-Clause"
] | null | null | null |
peact/__init__.py
|
glotzerlab/peact
|
ead549336d46127f1b05021a8cc2e6f3d4d298c2
|
[
"BSD-3-Clause"
] | null | null | null |
from .version import __version__
from ._peact import CallNode, CallGraph
from . import modules
| 23.75
| 39
| 0.821053
| 12
| 95
| 6.083333
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136842
| 95
| 3
| 40
| 31.666667
| 0.890244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8c1aa920427321dd1d022665ad70a4bba9a2d190
| 22
|
py
|
Python
|
.vscode/run_nose.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | 125
|
2015-03-04T09:14:46.000Z
|
2022-03-29T07:46:12.000Z
|
.vscode/run_nose.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | 1,018
|
2015-01-04T16:01:29.000Z
|
2022-03-31T19:23:09.000Z
|
.vscode/run_nose.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | 117
|
2015-01-10T08:05:52.000Z
|
2022-01-18T05:16:51.000Z
|
import nose
nose.run()
| 11
| 11
| 0.772727
| 4
| 22
| 4.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 22
| 2
| 12
| 11
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8c520d26c400fbce50f75a529c454c2b8fa82679
| 53
|
py
|
Python
|
HBdataMonitor/__init__.py
|
menno94/beheertoolHB
|
c4a93b3d3efcbf26390dc088663e8439e39fd47e
|
[
"MIT"
] | null | null | null |
HBdataMonitor/__init__.py
|
menno94/beheertoolHB
|
c4a93b3d3efcbf26390dc088663e8439e39fd47e
|
[
"MIT"
] | null | null | null |
HBdataMonitor/__init__.py
|
menno94/beheertoolHB
|
c4a93b3d3efcbf26390dc088663e8439e39fd47e
|
[
"MIT"
] | null | null | null |
from HBdataMonitor.HBdataMonitor import HBdataMonitor
| 53
| 53
| 0.924528
| 5
| 53
| 9.8
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056604
| 53
| 1
| 53
| 53
| 0.98
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4fb7fd4495e181e449428d2017e1e9d3a7ca6a59
| 144
|
py
|
Python
|
data/passprompt.py
|
Suleman-Elahi/aurin
|
19bec33f5a41ae008256e91cb3323a78cd7fbd0a
|
[
"MIT"
] | 52
|
2022-01-30T20:22:33.000Z
|
2022-03-27T00:47:02.000Z
|
data/passprompt.py
|
Suleman-Elahi/aurin
|
19bec33f5a41ae008256e91cb3323a78cd7fbd0a
|
[
"MIT"
] | 2
|
2022-01-31T19:13:42.000Z
|
2022-02-01T02:18:03.000Z
|
data/passprompt.py
|
Suleman-Elahi/aurin
|
19bec33f5a41ae008256e91cb3323a78cd7fbd0a
|
[
"MIT"
] | 3
|
2022-01-30T20:43:40.000Z
|
2022-02-07T18:15:04.000Z
|
import tkinter as tk
import tkinter.simpledialog
answer = tk.simpledialog.askstring("Enter sudo password", 'Password:', show="*")
print(answer)
| 28.8
| 80
| 0.770833
| 18
| 144
| 6.166667
| 0.666667
| 0.234234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097222
| 144
| 5
| 81
| 28.8
| 0.853846
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.25
| 0.5
| 0
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
4fc87a487029a8a79de141007d313e93e9af036d
| 294
|
py
|
Python
|
federatedscope/mf/trainer/__init__.py
|
alibaba/FederatedScope
|
fcf6d237624769ea094cfd68803901622f14fc23
|
[
"Apache-2.0"
] | 9
|
2022-03-24T07:59:37.000Z
|
2022-03-31T06:47:52.000Z
|
federatedscope/mf/trainer/__init__.py
|
alibaba/FederatedScope
|
fcf6d237624769ea094cfd68803901622f14fc23
|
[
"Apache-2.0"
] | 1
|
2022-03-28T13:52:17.000Z
|
2022-03-28T13:52:17.000Z
|
federatedscope/mf/trainer/__init__.py
|
alibaba/FederatedScope
|
fcf6d237624769ea094cfd68803901622f14fc23
|
[
"Apache-2.0"
] | null | null | null |
from federatedscope.mf.trainer.trainer import MFTrainer
from federatedscope.mf.trainer.trainer_sgdmf import wrap_MFTrainer, init_sgdmf_ctx, embedding_clip, hook_on_batch_backward
__all__ = [
'MFTrainer', 'wrap_MFTrainer', 'init_sgdmf_ctx', 'embedding_clip',
'hook_on_batch_backward'
]
| 36.75
| 122
| 0.812925
| 38
| 294
| 5.789474
| 0.447368
| 0.163636
| 0.181818
| 0.245455
| 0.827273
| 0.518182
| 0.518182
| 0.518182
| 0.518182
| 0.518182
| 0
| 0
| 0.098639
| 294
| 7
| 123
| 42
| 0.830189
| 0
| 0
| 0
| 0
| 0
| 0.248299
| 0.07483
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4fc98e0e75a0e83e96695482819d406509669830
| 64
|
py
|
Python
|
trainers/__init__.py
|
vWing7/VisRecSys
|
9420180f6124cc5367b33e77c6c94c33a9d97867
|
[
"MIT"
] | 21
|
2021-04-11T22:08:54.000Z
|
2021-12-15T15:06:35.000Z
|
trainers/__init__.py
|
ialab-puc/VisualRecSys-Tutorial-ICDM2021
|
7672237fcb451d06c5f27ad110990f8ee4708c4b
|
[
"MIT"
] | 1
|
2021-08-23T22:22:33.000Z
|
2021-08-23T23:23:24.000Z
|
trainers/__init__.py
|
ialab-puc/VisualRecSys-Tutorial-ICDM2021
|
7672237fcb451d06c5f27ad110990f8ee4708c4b
|
[
"MIT"
] | 5
|
2021-04-13T16:50:35.000Z
|
2021-10-01T17:39:10.000Z
|
from .trainer import Trainer
from .img_trainer import ImgTrainer
| 32
| 35
| 0.859375
| 9
| 64
| 6
| 0.555556
| 0.481481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109375
| 64
| 2
| 35
| 32
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8b36b33e31af745048c431c13bbdb8cb8f7eac51
| 206
|
py
|
Python
|
core/signals/handlers.py
|
zeestack/storefront3
|
3ba87aae61a4877f4e51bc80442fe5954bbda76d
|
[
"MIT"
] | null | null | null |
core/signals/handlers.py
|
zeestack/storefront3
|
3ba87aae61a4877f4e51bc80442fe5954bbda76d
|
[
"MIT"
] | null | null | null |
core/signals/handlers.py
|
zeestack/storefront3
|
3ba87aae61a4877f4e51bc80442fe5954bbda76d
|
[
"MIT"
] | null | null | null |
from django.dispatch import receiver
from store.signals import order_created
@receiver(order_created)
def on_order_created(sender, **kwargs):
print(f'{kwargs["order"]} has been successfully created')
| 25.75
| 61
| 0.786408
| 28
| 206
| 5.642857
| 0.642857
| 0.227848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11165
| 206
| 7
| 62
| 29.428571
| 0.863388
| 0
| 0
| 0
| 0
| 0
| 0.228155
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8cc7d896c3717dbcba4bc37b908f9dd6b5072eb9
| 452
|
py
|
Python
|
watchdog/back-end/v0.3.0/watchdog/app/util/user.py
|
Havana3351/Low-cost-remote-monitor
|
9f86a62b8515c0f9fddda31f25548680f0ad8e2f
|
[
"MIT"
] | 18
|
2021-12-03T13:18:07.000Z
|
2022-03-30T20:20:17.000Z
|
watchdog/back-end/v0.3.0/watchdog/app/util/user.py
|
Fairywyt/Low-cost-remote-monitor
|
263b98d969251d2dbef5fb5e4d42a58075e744fa
|
[
"MIT"
] | null | null | null |
watchdog/back-end/v0.3.0/watchdog/app/util/user.py
|
Fairywyt/Low-cost-remote-monitor
|
263b98d969251d2dbef5fb5e4d42a58075e744fa
|
[
"MIT"
] | 4
|
2022-03-22T09:58:00.000Z
|
2022-03-28T08:57:17.000Z
|
from werkzeug.security import generate_password_hash, check_password_hash
class User() :
username = 'admin'
password_hash = 'admin'
phonenum = ''
dorm = ''
room = ''
campus = ''
def set_password(self, password):
self.password_hash = generate_password_hash(password)
print(self.password_hash)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
| 26.588235
| 73
| 0.672566
| 50
| 452
| 5.8
| 0.42
| 0.331034
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234513
| 452
| 16
| 74
| 28.25
| 0.83815
| 0
| 0
| 0
| 1
| 0
| 0.022124
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0.538462
| 0.076923
| 0.076923
| 0.846154
| 0.076923
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
50a6ea17c79324ca6c775b14ee0903f2806f654b
| 14,739
|
py
|
Python
|
Basic Machine Vision/opencv_threaded_processing.py
|
DocVaughan/CRAWLAB-Code-Snippets
|
90c946bef0fbe37401f822d58ce5a6b3c5349616
|
[
"BSD-3-Clause"
] | 12
|
2015-03-03T18:32:03.000Z
|
2021-03-13T18:50:37.000Z
|
Basic Machine Vision/opencv_threaded_processing.py
|
DocVaughan/CRAWLAB-Code-Snippets
|
90c946bef0fbe37401f822d58ce5a6b3c5349616
|
[
"BSD-3-Clause"
] | null | null | null |
Basic Machine Vision/opencv_threaded_processing.py
|
DocVaughan/CRAWLAB-Code-Snippets
|
90c946bef0fbe37401f822d58ce5a6b3c5349616
|
[
"BSD-3-Clause"
] | 7
|
2017-01-20T20:31:54.000Z
|
2021-12-28T16:52:48.000Z
|
#! /usr/bin/env python
###############################################################################
# opencv_threaded_processing.py
#
# Demonstrating using threading to speed up an opencv pipeline.
# Rates will still be limited by hardware. Here, an fps improvements beyond
# the hardware limit of the camera will be somewhat misleading. The script is
# simply processing the same frame multiple times.
#
# Uses opencv 3 and the imutils library
#
# OpenCV was installed from:
# - https://anaconda.org/anaconda/opencv
# imutils installed using instructions at:
# - https://github.com/jrosebr1/imutils
#
# Adapted from code at:
# https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
#
#
# NOTE: Any plotting is set up for output, not viewing on screen.
# So, it will likely be ugly on screen. The saved PDFs should look
# better.
#
# Created: 03/23/18
# - Joshua Vaughan
# - joshua.vaughan@louisiana.edu
# - http://www.ucs.louisiana.edu/~jev9637
#
# Modified:
# *
#
# TODO:
# *
###############################################################################
# import the necessary packages
from __future__ import print_function
from imutils.video import WebcamVideoStream
from imutils.video import FPS
import argparse
import imutils
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--num-frames", type=int, default=150,
help="# of frames to loop over for FPS test")
ap.add_argument("-d", "--display", type=int, default=-1,
help="Whether or not frames should be displayed")
args = vars(ap.parse_args())
# Video codec to use in writing
# avc1 is h264, which we should use, if at all possible
FOURCC = cv2.VideoWriter_fourcc('a','v','c','1')
# Set the camera number to use (zero indexed)
CAMERA_SOURCE = 1
# define the lower and upper boundaries of the desired color in the HSV
# Tennis-ball green
colorLower = (29, 86, 6)
colorUpper = (64, 255, 255)
# Set up arrays to store the time and centroid location of the blob
data = np.zeros((args["num_frames"], 3)) # TODO: Make this more robust to indefinite capture
# We'll process THREADED_MULT x the number of frames we processed unthreaded
THREADED_MULT = 10
data_threaded = np.zeros((args["num_frames"] * THREADED_MULT, 3)) # TODO: Make this more robust to indefinite capture
try:
# grab a pointer to the video stream and initialize the FPS counter
print("[INFO] sampling frames from webcam...")
stream = cv2.VideoCapture(CAMERA_SOURCE)
fps = FPS().start()
# Default resolutions of the frame are obtained.The default resolutions are system dependent.
# We convert the resolutions from float to integer.
frame_width = int(stream.get(3))
frame_height = int(stream.get(4))
out = cv2.VideoWriter('output.mp4', FOURCC, 30.0, (frame_width,frame_height))
# Start a counter variable and save the start times for use in time tracking
# of live processing
count = 0
last_time = time.time()
start_time = last_time
total_elapsed_time = 0.0
# loop over some frames
while fps._numFrames < args["num_frames"]:
# grab the frame from the stream and resize it to have a maximum
# width of 400 pixels
(grabbed, frame) = stream.read()
# convert the frame to the HSV color space
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the desired color, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, colorLower, colorUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# Uncomment below to show the masked image
# NOTE: This will *dramatically* slow down the processing
# cv2.imshow("Frame", mask)
# find contours in the mask and initialize the current
# (x, y) center of the object
cnts = cv2.findContours(mask.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
data[count] = np.hstack((total_elapsed_time, np.asarray(center))).reshape(1,3)
# Write to a file
out.write(frame)
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# Calculate the time elapsed and estimate current fps from it
count = count + 1
current_time = time.time()
elapsed_time = current_time - last_time
total_elapsed_time = current_time - start_time
last_time = current_time # save current time for next loop
fps_estimate = 1 / elapsed_time
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
except (KeyboardInterrupt):
print("\n\nClosing...")
# Uncomment below to re-raise the exception
# raise
finally:
# Now, we can create a mask matching any rows that are all zeros
mask = np.all(np.isnan(data), axis=1) | np.all(data == 0, axis=1)
# Then, trim the data based on that mask. This way, the data array will
# only have rows that have data in them.
data= data[~mask]
# do a bit of cleanup
stream.release()
out.release()
cv2.destroyAllWindows()
try:
# created a *threaded *video stream, allow the camera senor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from webcam...")
vs = WebcamVideoStream(src=CAMERA_SOURCE).start()
fps = FPS().start()
out_threaded = cv2.VideoWriter('output_threaded.mp4', FOURCC, 90.0, (frame_width,frame_height))
# Start a counter variable and save the start times for use in time tracking
# of live processing
count = 0
last_time = time.time()
start_time = last_time
total_elapsed_time = 0.0
# loop over some frames...this time using the threaded stream
while fps._numFrames < args["num_frames"] * THREADED_MULT:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
# convert the frame to the HSV color space
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the desired color, then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, colorLower, colorUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# Uncomment below to show the masked image
# NOTE: This will *dramatically* slow down the processing
# cv2.imshow("Frame", mask)
# find contours in the mask and initialize the current
# (x, y) center of the object
cnts = cv2.findContours(mask.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
data_threaded[count] = np.hstack((total_elapsed_time, np.asarray(center))).reshape(1,3)
# Write to a file
out_threaded.write(frame)
# check to see if the frame should be displayed to our screen
if args["display"] > 0:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# Calculate the time elapsed and estimate current fps from it
count = count + 1
current_time = time.time()
elapsed_time = current_time - last_time
total_elapsed_time = current_time - start_time
last_time = current_time # save current time for next loop
fps_estimate = 1 / elapsed_time
# update the FPS counter
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
except (KeyboardInterrupt):
print("\n\nClosing...")
# Uncomment below to re-raise the exception
# raise
finally:
# Now, we can create a mask matching any rows that are all zeros
mask = np.all(np.isnan(data_threaded), axis=1) | np.all(data_threaded == 0, axis=1)
# Then, trim the data based on that mask. This way, the data array will
# only have rows that have data in them.
data_threaded = data_threaded[~mask]
# do a bit of cleanup
out_threaded.release()
cv2.destroyAllWindows()
vs.stop()
# Set the plot size - 3x2 aspect ratio is best
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.17, left=0.17, top=0.96, right=0.96)
# Change the axis units font
plt.setp(ax.get_ymajorticklabels(),fontsize=18)
plt.setp(ax.get_xmajorticklabels(),fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Turn on the plot grid and set appropriate linestyle and color
ax.grid(True,linestyle=':', color='0.75')
ax.set_axisbelow(True)
# Define the X and Y axis labels
plt.xlabel('Time (s)', fontsize=22, weight='bold', labelpad=5)
plt.ylabel('Horiz. Location (pixels)', fontsize=22, weight='bold', labelpad=10)
plt.plot(data[:,0], data[:,1], linewidth=2, linestyle='--', label=r'Baseline')
plt.plot(data_threaded[:,0], data_threaded[:,1], linewidth=2, linestyle='-', label=r'Threaded')
# uncomment below and set limits if needed
# plt.xlim(0,5)
# plt.ylim(0,10)
# Create the legend, then fix the fontsize
leg = plt.legend(loc='upper right', ncol = 1, fancybox=True)
ltext = leg.get_texts()
plt.setp(ltext,fontsize=18)
# Adjust the page layout filling the page using the new tight_layout command
plt.tight_layout(pad=0.5)
# save the figure as a high-res pdf in the current folder
# plt.savefig('plot_filename.pdf')
# Set the plot size - 3x2 aspect ratio is best
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.17, left=0.17, top=0.96, right=0.96)
# Change the axis units font
plt.setp(ax.get_ymajorticklabels(),fontsize=18)
plt.setp(ax.get_xmajorticklabels(),fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Turn on the plot grid and set appropriate linestyle and color
ax.grid(True,linestyle=':', color='0.75')
ax.set_axisbelow(True)
# Define the X and Y axis labels
plt.xlabel('Time (s)', fontsize=22, weight='bold', labelpad=5)
plt.ylabel('Vertical Location (pixels)', fontsize=22, weight='bold', labelpad=10)
plt.plot(data[:,0], data[:,2], linewidth=2, linestyle='--', label=r'Baseline')
plt.plot(data_threaded[:,0], data_threaded[:,2], linewidth=2, linestyle='-', label=r'Threaded')
# uncomment below and set limits if needed
# plt.xlim(0,5)
# plt.ylim(0,10)
# Create the legend, then fix the fontsize
leg = plt.legend(loc='upper right', ncol = 1, fancybox=True)
ltext = leg.get_texts()
plt.setp(ltext,fontsize=18)
# Adjust the page layout filling the page using the new tight_layout command
plt.tight_layout(pad=0.5)
# save the figure as a high-res pdf in the current folder
# plt.savefig('plot_filename.pdf')
# Set the plot size - 3x2 aspect ratio is best
fig = plt.figure(figsize=(6,4))
ax = plt.gca()
plt.subplots_adjust(bottom=0.17, left=0.17, top=0.96, right=0.96)
# Change the axis units font
plt.setp(ax.get_ymajorticklabels(),fontsize=18)
plt.setp(ax.get_xmajorticklabels(),fontsize=18)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
# Turn on the plot grid and set appropriate linestyle and color
ax.grid(True,linestyle=':', color='0.75')
ax.set_axisbelow(True)
# Define the X and Y axis labels
plt.xlabel('Horizontal Location (pixels)', fontsize=22, weight='bold', labelpad=5)
plt.ylabel('Vertical Location (pixels)', fontsize=22, weight='bold', labelpad=10)
plt.plot(data[:,1], data[:,2], linewidth=2, linestyle='--', label=r'Baseline')
plt.plot(data_threaded[:,1], data_threaded[:,2], linewidth=2, linestyle='-', label=r'Threaded')
# uncomment below and set limits if needed
# plt.xlim(0,5)
# plt.ylim(0,10)
plt.axis('equal')
# Create the legend, then fix the fontsize
leg = plt.legend(loc='upper right', ncol = 1, fancybox=True)
ltext = leg.get_texts()
plt.setp(ltext,fontsize=18)
# Adjust the page layout filling the page using the new tight_layout command
plt.tight_layout(pad=0.5)
# save the figure as a high-res pdf in the current folder
# plt.savefig('plot_filename.pdf')
# show the figure
plt.show()
| 33.346154
| 118
| 0.646245
| 2,130
| 14,739
| 4.40892
| 0.207042
| 0.015334
| 0.007667
| 0.007667
| 0.757853
| 0.74401
| 0.726973
| 0.725482
| 0.725482
| 0.716537
| 0
| 0.029093
| 0.230409
| 14,739
| 442
| 119
| 33.346154
| 0.798819
| 0.392971
| 0
| 0.697297
| 0
| 0
| 0.089457
| 0
| 0
| 0
| 0.000926
| 0.002262
| 0
| 1
| 0
| false
| 0
| 0.048649
| 0
| 0.048649
| 0.048649
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50b0a34eb06e4ed5e7b1275112847fb3c1541ee2
| 59
|
py
|
Python
|
fatima/agents/__init__.py
|
AmrMKayid/fatima
|
9ee5365889bca8bc05a84eb4130b9357a8177366
|
[
"MIT"
] | null | null | null |
fatima/agents/__init__.py
|
AmrMKayid/fatima
|
9ee5365889bca8bc05a84eb4130b9357a8177366
|
[
"MIT"
] | null | null | null |
fatima/agents/__init__.py
|
AmrMKayid/fatima
|
9ee5365889bca8bc05a84eb4130b9357a8177366
|
[
"MIT"
] | null | null | null |
from .base import BaseTrainer
from .trainer import Trainer
| 19.666667
| 29
| 0.830508
| 8
| 59
| 6.125
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135593
| 59
| 2
| 30
| 29.5
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
50b8eef5e1d0786b7dc0c8abb415cad6b469b46d
| 217
|
py
|
Python
|
wrapanapi/utils/__init__.py
|
ManageIQ/mgmtsystem
|
1a0ee5b99ef3770e119c6264f4e452640c4275bf
|
[
"MIT"
] | 13
|
2016-09-13T07:30:02.000Z
|
2019-05-22T09:14:27.000Z
|
wrapanapi/utils/__init__.py
|
ManageIQ/mgmtsystem
|
1a0ee5b99ef3770e119c6264f4e452640c4275bf
|
[
"MIT"
] | 228
|
2016-06-15T10:23:38.000Z
|
2020-01-13T13:49:31.000Z
|
wrapanapi/utils/__init__.py
|
ManageIQ/mgmtsystem
|
1a0ee5b99ef3770e119c6264f4e452640c4275bf
|
[
"MIT"
] | 61
|
2016-07-21T15:59:52.000Z
|
2019-09-23T11:03:41.000Z
|
from .logger_mixin import LoggerMixin
from .json_utils import (
json_load_byteified, json_loads_byteified, eval_strings
)
__all__ = ['LoggerMixin', 'json_load_byteified', 'json_loads_byteified', 'eval_strings']
| 27.125
| 88
| 0.797235
| 27
| 217
| 5.814815
| 0.481481
| 0.101911
| 0.216561
| 0.267516
| 0.585987
| 0.585987
| 0.585987
| 0.585987
| 0
| 0
| 0
| 0
| 0.110599
| 217
| 7
| 89
| 31
| 0.813472
| 0
| 0
| 0
| 0
| 0
| 0.287037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
50c91744e003474b84a93510a8f64622aa38cc46
| 84
|
py
|
Python
|
python3/koans/jims.py
|
digiaonline/python_koans
|
e6264b70a32c6af5d55806cacae37cace363a0b4
|
[
"MIT"
] | 1
|
2020-09-23T06:33:59.000Z
|
2020-09-23T06:33:59.000Z
|
python3/koans/jims.py
|
digiaonline/python_koans
|
e6264b70a32c6af5d55806cacae37cace363a0b4
|
[
"MIT"
] | null | null | null |
python3/koans/jims.py
|
digiaonline/python_koans
|
e6264b70a32c6af5d55806cacae37cace363a0b4
|
[
"MIT"
] | 1
|
2020-09-22T11:33:22.000Z
|
2020-09-22T11:33:22.000Z
|
#!/usr/bin/env python
class Dog:
def identify(self):
return "jims dog"
| 14
| 25
| 0.607143
| 12
| 84
| 4.25
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.261905
| 84
| 5
| 26
| 16.8
| 0.822581
| 0.238095
| 0
| 0
| 0
| 0
| 0.126984
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
50daeceebf51065df5ecd0960bceed75c5acb5cf
| 45
|
py
|
Python
|
fauxcaml/lir/__init__.py
|
eignnx/fauxcaml
|
082625f5803d6f676c0d63b6ce45b03a6069d720
|
[
"MIT"
] | 1
|
2019-05-11T00:49:48.000Z
|
2019-05-11T00:49:48.000Z
|
fauxcaml/lir/__init__.py
|
eignnx/fauxcaml
|
082625f5803d6f676c0d63b6ce45b03a6069d720
|
[
"MIT"
] | 5
|
2019-04-01T21:36:17.000Z
|
2019-05-13T22:04:58.000Z
|
fauxcaml/lir/__init__.py
|
eignnx/fauxcaml
|
082625f5803d6f676c0d63b6ce45b03a6069d720
|
[
"MIT"
] | null | null | null |
"""
Low-level Intermediate Representation
"""
| 15
| 37
| 0.755556
| 4
| 45
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 3
| 38
| 15
| 0.829268
| 0.822222
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
50f4f7bafea6af5c200ee26ab6a8400670c75aef
| 48
|
py
|
Python
|
adviewer/tests/conftest.py
|
ZLLentz/adviewer
|
e73161ccf62384fb45a30996f439d6b56580c193
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
adviewer/tests/conftest.py
|
ZLLentz/adviewer
|
e73161ccf62384fb45a30996f439d6b56580c193
|
[
"BSD-3-Clause-LBNL"
] | 7
|
2019-03-12T16:27:44.000Z
|
2021-04-15T16:17:14.000Z
|
adviewer/tests/conftest.py
|
ZLLentz/adviewer
|
e73161ccf62384fb45a30996f439d6b56580c193
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2019-03-12T16:16:48.000Z
|
2021-04-15T18:42:00.000Z
|
import ophyd
ophyd.sim.logger.setLevel('INFO')
| 12
| 33
| 0.770833
| 7
| 48
| 5.285714
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 3
| 34
| 16
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
50fd2998d0043911d65659d6add6518559e00669
| 68
|
py
|
Python
|
python/templates/docs/src/index/index_01_complex.py
|
dutradda/devtools
|
c07f0a7f937777615b30835393e5811d470dc223
|
[
"MIT"
] | 1
|
2020-06-30T23:39:00.000Z
|
2020-06-30T23:39:00.000Z
|
python/templates/docs/src/index/index_01_complex.py
|
dutradda/devtools
|
c07f0a7f937777615b30835393e5811d470dc223
|
[
"MIT"
] | null | null | null |
python/templates/docs/src/index/index_01_complex.py
|
dutradda/devtools
|
c07f0a7f937777615b30835393e5811d470dc223
|
[
"MIT"
] | 1
|
2019-09-29T23:52:20.000Z
|
2019-09-29T23:52:20.000Z
|
import json
print(json.dumps(dict(hello='Hello', world='World!')))
| 17
| 54
| 0.705882
| 10
| 68
| 4.8
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 68
| 3
| 55
| 22.666667
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0.161765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
0fbfce29179a0a488e009c5196ae5c9e883ad658
| 230
|
py
|
Python
|
kornia/contrib/__init__.py
|
shaunster0/kornia
|
71acf455ee36f2050b7be5ea993b6db773f502eb
|
[
"ECL-2.0",
"Apache-2.0"
] | 51
|
2019-10-11T18:47:30.000Z
|
2021-05-03T06:42:37.000Z
|
kornia/contrib/__init__.py
|
shaunster0/kornia
|
71acf455ee36f2050b7be5ea993b6db773f502eb
|
[
"ECL-2.0",
"Apache-2.0"
] | 9
|
2022-01-25T00:28:05.000Z
|
2022-03-20T09:14:39.000Z
|
kornia/contrib/__init__.py
|
shaunster0/kornia
|
71acf455ee36f2050b7be5ea993b6db773f502eb
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2019-10-11T19:59:05.000Z
|
2020-07-10T02:28:52.000Z
|
from .extract_patches import extract_tensor_patches, ExtractTensorPatches
from .max_blur_pool import max_blur_pool2d, MaxBlurPool2d
__all__ = ["extract_tensor_patches", "max_blur_pool2d", "ExtractTensorPatches", "MaxBlurPool2d"]
| 46
| 96
| 0.847826
| 26
| 230
| 6.923077
| 0.461538
| 0.116667
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018779
| 0.073913
| 230
| 4
| 97
| 57.5
| 0.826291
| 0
| 0
| 0
| 0
| 0
| 0.304348
| 0.095652
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0fe68c30ceec83a42cd6f1c58270702dded4247f
| 46
|
py
|
Python
|
codewof/programming/content/en/end-of-file/solution.py
|
uccser-admin/programming-practice-prototype
|
3af4c7d85308ac5bb35bb13be3ec18cac4eb8308
|
[
"MIT"
] | 3
|
2019-08-29T04:11:22.000Z
|
2021-06-22T16:05:51.000Z
|
codewof/programming/content/en/end-of-file/solution.py
|
uccser-admin/programming-practice-prototype
|
3af4c7d85308ac5bb35bb13be3ec18cac4eb8308
|
[
"MIT"
] | 265
|
2019-05-30T03:51:46.000Z
|
2022-03-31T01:05:12.000Z
|
codewof/programming/content/en/end-of-file/solution.py
|
samuelsandri/codewof
|
c9b8b378c06b15a0c42ae863b8f46581de04fdfc
|
[
"MIT"
] | 7
|
2019-06-29T12:13:37.000Z
|
2021-09-06T06:49:14.000Z
|
def end_of_file(file):
file.append("EOF")
| 15.333333
| 22
| 0.673913
| 8
| 46
| 3.625
| 0.75
| 0.551724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 46
| 2
| 23
| 23
| 0.74359
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0ff3affe5bf05ade8e724c2bbff70720f7aa9cef
| 134
|
py
|
Python
|
sitetree/runtests.py
|
PetrDlouhy/django-sitetree
|
f9525b10771c4b461c260925b8c5fb59bc9f5449
|
[
"BSD-3-Clause"
] | null | null | null |
sitetree/runtests.py
|
PetrDlouhy/django-sitetree
|
f9525b10771c4b461c260925b8c5fb59bc9f5449
|
[
"BSD-3-Clause"
] | null | null | null |
sitetree/runtests.py
|
PetrDlouhy/django-sitetree
|
f9525b10771c4b461c260925b8c5fb59bc9f5449
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python
import sys
if __name__ == '__main__':
from pytest import main as pytest_main
sys.exit(pytest_main())
| 16.75
| 42
| 0.69403
| 20
| 134
| 4.15
| 0.65
| 0.240964
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19403
| 134
| 7
| 43
| 19.142857
| 0.768519
| 0.156716
| 0
| 0
| 0
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ba2943f455b1d2f0045e27dd1d6ca48aa97aef64
| 147
|
py
|
Python
|
tcpsite/user/admin.py
|
TeamCrazyPerformance/tcp-web-back
|
71e0c9f0bd511bb49b4fe5928f7f7aa6912ba255
|
[
"BSD-3-Clause"
] | null | null | null |
tcpsite/user/admin.py
|
TeamCrazyPerformance/tcp-web-back
|
71e0c9f0bd511bb49b4fe5928f7f7aa6912ba255
|
[
"BSD-3-Clause"
] | 25
|
2020-03-08T11:27:21.000Z
|
2021-06-04T22:39:56.000Z
|
tcpsite/user/admin.py
|
TeamCrazyPerformance/tcp-web-back
|
71e0c9f0bd511bb49b4fe5928f7f7aa6912ba255
|
[
"BSD-3-Clause"
] | 1
|
2020-03-08T10:57:25.000Z
|
2020-03-08T10:57:25.000Z
|
from django.contrib import admin
from .models import Grade, User
# Register your models here.
admin.site.register(Grade)
admin.site.register(User)
| 24.5
| 32
| 0.802721
| 22
| 147
| 5.363636
| 0.545455
| 0.152542
| 0.288136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108844
| 147
| 6
| 33
| 24.5
| 0.900763
| 0.176871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
e84806019e8921625942685785d7941c5951576b
| 183
|
py
|
Python
|
django_dashboard/documents.py
|
keepexploring/smartbiogas
|
ca663435b05666113e3c0cb55e6f087c61497208
|
[
"MIT"
] | null | null | null |
django_dashboard/documents.py
|
keepexploring/smartbiogas
|
ca663435b05666113e3c0cb55e6f087c61497208
|
[
"MIT"
] | 10
|
2017-11-24T12:15:40.000Z
|
2022-02-10T06:41:32.000Z
|
django_dashboard/documents.py
|
keepexploring/smartbiogas
|
ca663435b05666113e3c0cb55e6f087c61497208
|
[
"MIT"
] | null | null | null |
from django_elasticsearch_dsl import DocType, Index
from .models import Company, UserDetail, TechnicianDetail, BiogasPlantContact, TechnicianDetail, BiogasPlant, JobHistory, Dashboard
| 91.5
| 131
| 0.868852
| 18
| 183
| 8.722222
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081967
| 183
| 2
| 131
| 91.5
| 0.934524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e856b8a169849b7d81031925b3fdd9541524b0bb
| 88
|
py
|
Python
|
perceptron/__init__.py
|
justinnhli/justinnhli-oxy
|
27d7890375b632ad99654d401302c125027dcfa3
|
[
"MIT"
] | null | null | null |
perceptron/__init__.py
|
justinnhli/justinnhli-oxy
|
27d7890375b632ad99654d401302c125027dcfa3
|
[
"MIT"
] | 1
|
2017-04-13T18:36:08.000Z
|
2017-04-24T02:39:40.000Z
|
perceptron/__init__.py
|
justinnhli/justinnhli-oxy
|
27d7890375b632ad99654d401302c125027dcfa3
|
[
"MIT"
] | 1
|
2017-04-12T00:30:29.000Z
|
2017-04-12T00:30:29.000Z
|
"""Graphical explanation of perceptron training."""
from .app import app as perceptron
| 22
| 51
| 0.772727
| 11
| 88
| 6.181818
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 88
| 3
| 52
| 29.333333
| 0.894737
| 0.511364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e87d1aef950bf49d511a7f8cdc9cf6bb21112ec5
| 82
|
py
|
Python
|
leetcode709.py
|
AmitHasanShuvo/Programming
|
f47ecc626e518a0bf5f9f749afd15ce67bbe737b
|
[
"MIT"
] | 8
|
2019-05-26T19:24:13.000Z
|
2021-03-24T17:36:14.000Z
|
leetcode709.py
|
AmitHasanShuvo/Programming
|
f47ecc626e518a0bf5f9f749afd15ce67bbe737b
|
[
"MIT"
] | null | null | null |
leetcode709.py
|
AmitHasanShuvo/Programming
|
f47ecc626e518a0bf5f9f749afd15ce67bbe737b
|
[
"MIT"
] | 1
|
2020-04-19T04:59:54.000Z
|
2020-04-19T04:59:54.000Z
|
class Solution:
def toLowerCase(self, string):
return string.lower()
| 16.4
| 34
| 0.658537
| 9
| 82
| 6
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243902
| 82
| 4
| 35
| 20.5
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
2cf6752316095f65c2b7745bac75bb92fe377365
| 154
|
py
|
Python
|
DataQualityTester/lib/helpers.py
|
pwyf/data-quality-tester
|
d7674849c64d4d41ff4e4b6b12631994c7ce0a92
|
[
"MIT"
] | null | null | null |
DataQualityTester/lib/helpers.py
|
pwyf/data-quality-tester
|
d7674849c64d4d41ff4e4b6b12631994c7ce0a92
|
[
"MIT"
] | 53
|
2017-04-07T09:41:38.000Z
|
2022-02-11T14:26:46.000Z
|
DataQualityTester/lib/helpers.py
|
pwyf/data-quality-tester
|
d7674849c64d4d41ff4e4b6b12631994c7ce0a92
|
[
"MIT"
] | 3
|
2017-07-19T13:43:14.000Z
|
2019-10-29T15:25:49.000Z
|
import re
def pprint(explanation):
explanation = explanation.replace('\n', '<br>')
return re.sub(r'`([^`]*)`', r'<code>\1</code>', explanation)
| 22
| 64
| 0.603896
| 19
| 154
| 4.894737
| 0.684211
| 0.473118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 0.142857
| 154
| 6
| 65
| 25.666667
| 0.69697
| 0
| 0
| 0
| 0
| 0
| 0.194805
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
fa16318202182f293e545baeb12d909efb292493
| 437
|
py
|
Python
|
lino_xl/lib/pages/fixtures/std.py
|
khchine5/xl
|
b1634937a9ce87af1e948eb712b934b11f221d9d
|
[
"BSD-2-Clause"
] | 1
|
2018-01-12T14:09:48.000Z
|
2018-01-12T14:09:48.000Z
|
lino_xl/lib/pages/fixtures/std.py
|
khchine5/xl
|
b1634937a9ce87af1e948eb712b934b11f221d9d
|
[
"BSD-2-Clause"
] | 1
|
2019-09-10T05:03:47.000Z
|
2019-09-10T05:03:47.000Z
|
lino_xl/lib/pages/fixtures/std.py
|
khchine5/xl
|
b1634937a9ce87af1e948eb712b934b11f221d9d
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
# Copyright 2012-2016 Luc Saffre
#
# License: BSD (see file COPYING for details)
"""
Default data for `pages` is the content defined in
:mod:`lino_xl.lib.pages.fixtures.web`.
"""
#~ from lino_xl.lib.pages.fixtures.web import objects
def objects():
from lino_xl.lib.pages.fixtures.intro import objects
yield objects()
#~ from lino_xl.lib.pages.fixtures.man import objects
#~ yield objects()
| 20.809524
| 57
| 0.695652
| 64
| 437
| 4.6875
| 0.5625
| 0.08
| 0.12
| 0.186667
| 0.4
| 0.4
| 0.22
| 0
| 0
| 0
| 0
| 0.024862
| 0.171625
| 437
| 20
| 58
| 21.85
| 0.803867
| 0.707094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fa3fcdc406ec6bda922d09c25b66824866a23753
| 100
|
py
|
Python
|
xcube_hub/core/services.py
|
bcdev/xcube-hub
|
8eab0fccd340aa487560a41ae7c59dedb0cb08a8
|
[
"MIT"
] | 3
|
2021-03-08T09:47:23.000Z
|
2021-09-13T04:53:42.000Z
|
xcube_hub/core/services.py
|
bcdev/xcube-hub
|
8eab0fccd340aa487560a41ae7c59dedb0cb08a8
|
[
"MIT"
] | 9
|
2021-06-23T15:33:04.000Z
|
2022-03-30T08:30:17.000Z
|
xcube_hub/core/services.py
|
bcdev/xcube-hub
|
8eab0fccd340aa487560a41ae7c59dedb0cb08a8
|
[
"MIT"
] | null | null | null |
_SERVICES = ["xcube_gen", "xcube_serve", "xcube_geodb"]
def get_services():
return _SERVICES
| 14.285714
| 55
| 0.7
| 12
| 100
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 100
| 6
| 56
| 16.666667
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0.313131
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
fa4f1f0d1d436bda0a462ab9acd04cbceac286c8
| 1,721
|
py
|
Python
|
google/cloud/bigquery/datatransfer_v1/proto/resourcestate_pb2.py
|
cmm08/bq-dts-partner-clients-python
|
29f19c27ec95769ccbc21f48553fed451a1b2ae5
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigquery/datatransfer_v1/proto/resourcestate_pb2.py
|
cmm08/bq-dts-partner-clients-python
|
29f19c27ec95769ccbc21f48553fed451a1b2ae5
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigquery/datatransfer_v1/proto/resourcestate_pb2.py
|
cmm08/bq-dts-partner-clients-python
|
29f19c27ec95769ccbc21f48553fed451a1b2ae5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/bigquery/datatransfer_v1/proto/resourcestate.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/bigquery/datatransfer_v1/proto/resourcestate.proto',
package='google.cloud.bigquery.datatransfer.v1',
syntax='proto3',
serialized_options=_b('\n)com.google.cloud.bigquery.datatransfer.v1B\022ResourceStateProtoP\001ZQgoogle.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1;datatransfer\252\002%Google.Cloud.BigQuery.DataTransfer.V1\312\002%Google\\Cloud\\BigQuery\\DataTransfer\\V1'),
serialized_pb=_b('\n?google/cloud/bigquery/datatransfer_v1/proto/resourcestate.proto\x12%google.cloud.bigquery.datatransfer.v1\x1a\x1fgoogle/protobuf/timestamp.protoB\xe4\x01\n)com.google.cloud.bigquery.datatransfer.v1B\x12ResourceStateProtoP\x01ZQgoogle.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1;datatransfer\xaa\x02%Google.Cloud.BigQuery.DataTransfer.V1\xca\x02%Google\\Cloud\\BigQuery\\DataTransfer\\V1b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 49.171429
| 440
| 0.825102
| 224
| 1,721
| 6.15625
| 0.392857
| 0.122553
| 0.235678
| 0.247281
| 0.486584
| 0.385787
| 0.333575
| 0.217549
| 0.095722
| 0
| 0
| 0.03525
| 0.06043
| 1,721
| 34
| 441
| 50.617647
| 0.817563
| 0.129576
| 0
| 0
| 1
| 0.111111
| 0.523458
| 0.515416
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
d73a731240f26cdeb5c028f750a4a33461c54a69
| 182
|
py
|
Python
|
tests/python/sample.py
|
Comcast/python-batch-runner
|
ec918f2cd62c1f4d26e52aa9bba5d08e15d107bd
|
[
"Apache-2.0"
] | 21
|
2019-07-03T18:01:16.000Z
|
2022-02-23T04:02:03.000Z
|
tests/python/sample.py
|
Othello1111/python-batch-runner
|
0b3355c2f95161c267dd4d5faa37e1b418e4f266
|
[
"Apache-2.0"
] | 11
|
2019-08-22T13:16:09.000Z
|
2022-02-22T21:48:49.000Z
|
tests/python/sample.py
|
Othello1111/python-batch-runner
|
0b3355c2f95161c267dd4d5faa37e1b418e4f266
|
[
"Apache-2.0"
] | 6
|
2020-10-07T16:43:50.000Z
|
2022-02-09T17:25:51.000Z
|
import time
from pyrunner import Worker
class SayHello(Worker):
def run(self):
self.logger.info('Hello World!')
return
class FailMe(Worker):
def run(self):
return 1
| 16.545455
| 36
| 0.697802
| 26
| 182
| 4.884615
| 0.653846
| 0.141732
| 0.188976
| 0.251969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006849
| 0.197802
| 182
| 11
| 37
| 16.545455
| 0.863014
| 0
| 0
| 0.222222
| 0
| 0
| 0.065574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.222222
| 0.111111
| 0.888889
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
d748141bf0f8701930f3837374ff7b436253f3cd
| 274
|
py
|
Python
|
setup.py
|
fegaris/social_networks_term_inspector
|
42ec3e1087b432689c6c88c1b745938cb486c77b
|
[
"Apache-2.0"
] | 1
|
2022-01-27T18:37:09.000Z
|
2022-01-27T18:37:09.000Z
|
setup.py
|
fegaris/social_networks_term_inspector
|
42ec3e1087b432689c6c88c1b745938cb486c77b
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
fegaris/social_networks_term_inspector
|
42ec3e1087b432689c6c88c1b745938cb486c77b
|
[
"Apache-2.0"
] | 2
|
2022-01-27T18:39:45.000Z
|
2022-01-27T19:38:15.000Z
|
from setuptools import find_packages, setup
setup(
name='social_networks_term_inspector',
packages=find_packages(include=['social_networks_term_inspector']),
version='0.1.0',
description='social_networks_term_inspector',
author='Me',
license='MIT',
)
| 30.444444
| 71
| 0.744526
| 33
| 274
| 5.848485
| 0.606061
| 0.217617
| 0.279793
| 0.419689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012605
| 0.131387
| 274
| 9
| 72
| 30.444444
| 0.798319
| 0
| 0
| 0
| 0
| 0
| 0.363636
| 0.327273
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d758daa89634a10d24a80b0232667829d8d8b29b
| 109
|
py
|
Python
|
brightway2/__init__.py
|
elinlucy/brightway2
|
dfda88177ae041533f4f071d1824365275919c00
|
[
"BSD-3-Clause"
] | null | null | null |
brightway2/__init__.py
|
elinlucy/brightway2
|
dfda88177ae041533f4f071d1824365275919c00
|
[
"BSD-3-Clause"
] | null | null | null |
brightway2/__init__.py
|
elinlucy/brightway2
|
dfda88177ae041533f4f071d1824365275919c00
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*
from bw2data import *
from bw2calc import *
from bw2io import *
__version__ = (2, 3)
| 15.571429
| 22
| 0.66055
| 15
| 109
| 4.533333
| 0.733333
| 0.294118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0.201835
| 109
| 6
| 23
| 18.166667
| 0.712644
| 0.183486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d785d97a6e08b575e3da43fceec5390d430d6248
| 417
|
py
|
Python
|
Python3/Exercises/Unpacking/count_sevens.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
Python3/Exercises/Unpacking/count_sevens.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
Python3/Exercises/Unpacking/count_sevens.py
|
norbertosanchezdichi/TIL
|
2e9719ddd288022f53b094a42679e849bdbcc625
|
[
"MIT"
] | null | null | null |
def count_sevens(*args):
return args.count(7)
nums = [90,1,35,67,89,20,3,1,2,3,4,5,6,9,34,46,57,68,79,12,23,34,55,1,90,54,34,76,8,23,34,45,56,67,78,12,23,34,45,56,67,768,23,4,5,6,7,8,9,12,34,14,15,16,17,11,7,11,8,4,6,2,5,8,7,10,12,13,14,15,7,8,7,7,345,23,34,45,56,67,1,7,3,6,7,2,3,4,5,6,7,8,9,8,7,6,5,4,2,1,2,3,4,5,6,7,8,9,0,9,8,7,8,7,6,5,4,3,2,1,7]
result1 = count_sevens(1, 4, 7)
result2 = count_sevens(*nums)
| 59.571429
| 303
| 0.618705
| 140
| 417
| 1.821429
| 0.321429
| 0.039216
| 0.047059
| 0.047059
| 0.270588
| 0.113725
| 0.062745
| 0.062745
| 0
| 0
| 0
| 0.453401
| 0.047962
| 417
| 7
| 304
| 59.571429
| 0.188917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0.2
| 0.4
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
d78a9bd022afe918be830f2fd14d882ce858572e
| 45
|
py
|
Python
|
src/atcoder/abc140/e/sol_3.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 1
|
2021-07-11T03:20:10.000Z
|
2021-07-11T03:20:10.000Z
|
src/atcoder/abc140/e/sol_3.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | 39
|
2021-07-10T05:21:09.000Z
|
2021-12-15T06:10:12.000Z
|
src/atcoder/abc140/e/sol_3.py
|
kagemeka/competitive-programming
|
c70fe481bcd518f507b885fc9234691d8ce63171
|
[
"MIT"
] | null | null | null |
# Set with binary search tree (or SBBST).
| 15
| 43
| 0.666667
| 7
| 45
| 4.285714
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.244444
| 45
| 2
| 44
| 22.5
| 0.882353
| 0.866667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d795a90731be7d92f5c4e0ef24aa1f7faa1ff1ab
| 121
|
py
|
Python
|
django_settings_custom/__init__.py
|
ThomasMarques/django-settings-custom
|
3cc92c524ab6cc360de71053035eb801b3f3fbcf
|
[
"MIT"
] | 2
|
2019-03-03T13:36:13.000Z
|
2019-11-05T09:48:06.000Z
|
django_settings_custom/__init__.py
|
ThomasMarques/django-settings-custom
|
3cc92c524ab6cc360de71053035eb801b3f3fbcf
|
[
"MIT"
] | null | null | null |
django_settings_custom/__init__.py
|
ThomasMarques/django-settings-custom
|
3cc92c524ab6cc360de71053035eb801b3f3fbcf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Main package of django_settings_custom."""
from .version import __version__, __version_date__
| 30.25
| 50
| 0.735537
| 15
| 121
| 5.2
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.115702
| 121
| 3
| 51
| 40.333333
| 0.719626
| 0.512397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ad05f06c7fe67ae1c89dddcafe9f7ce63c336b6d
| 17,418
|
py
|
Python
|
formlibrary/views.py
|
mercycorps/TolaWorkflow
|
59542132fafd611081adb0e8cfaa04abc5886d7a
|
[
"Apache-2.0"
] | null | null | null |
formlibrary/views.py
|
mercycorps/TolaWorkflow
|
59542132fafd611081adb0e8cfaa04abc5886d7a
|
[
"Apache-2.0"
] | null | null | null |
formlibrary/views.py
|
mercycorps/TolaWorkflow
|
59542132fafd611081adb0e8cfaa04abc5886d7a
|
[
"Apache-2.0"
] | null | null | null |
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
from tola_management.permissions import has_projects_access
from .models import TrainingAttendance, Beneficiary, Distribution
from django.core.urlresolvers import reverse_lazy
from .forms import TrainingAttendanceForm, BeneficiaryForm, DistributionForm
from workflow.models import FormGuidance, Program, ProjectAgreement
from django.utils.decorators import method_decorator
from tola.util import getCountry, group_excluded
from django.shortcuts import render
from django.contrib import messages
from django.db.models import Q
from django.http import HttpResponseRedirect, JsonResponse
from django.views.generic.detail import View
from mixins import AjaxableResponseMixin
import json
from django.core.serializers.json import DjangoJSONEncoder
@method_decorator(has_projects_access, name='dispatch')
class TrainingList(ListView):
"""
Training Attendance
"""
model = TrainingAttendance
template_name = 'formlibrary/training_list.html'
def get(self, request, *args, **kwargs):
project_agreement_id = self.kwargs['pk']
countries = getCountry(request.user)
getPrograms = Program.objects.all().filter(funding_status="Funded", country__in=countries).distinct()
if int(self.kwargs['pk']) == 0:
getTraining = TrainingAttendance.objects.all().filter(program__country__in=countries)
else:
getTraining = TrainingAttendance.objects.all().filter(project_agreement_id=self.kwargs['pk'])
return render(request, self.template_name, {'getTraining': getTraining, 'project_agreement_id': project_agreement_id, 'getPrograms': getPrograms})
@method_decorator(has_projects_access, name='dispatch')
class TrainingCreate(CreateView):
"""
Training Form
"""
model = TrainingAttendance
@method_decorator(group_excluded('ViewOnly', url='workflow/permission'))
def dispatch(self, request, *args, **kwargs):
try:
self.guidance = FormGuidance.objects.get(form="Training")
except FormGuidance.DoesNotExist:
self.guidance = None
return super(TrainingCreate, self).dispatch(request, *args, **kwargs)
# add the request to the kwargs
def get_form_kwargs(self):
kwargs = super(TrainingCreate, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_initial(self):
initial = {
'agreement': self.kwargs['id'],
}
return initial
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Training Created!')
latest = TrainingAttendance.objects.latest('id')
redirect_url = '/formlibrary/training_update/' + str(latest.id)
return HttpResponseRedirect(redirect_url)
form_class = TrainingAttendanceForm
@method_decorator(has_projects_access, name='dispatch')
class TrainingUpdate(UpdateView):
"""
Training Form
"""
model = TrainingAttendance
@method_decorator(group_excluded('ViewOnly', url='workflow/permission'))
def dispatch(self, request, *args, **kwargs):
try:
self.guidance = FormGuidance.objects.get(form="Training")
except FormGuidance.DoesNotExist:
self.guidance = None
return super(TrainingUpdate, self).dispatch(request, *args, **kwargs)
# add the request to the kwargs
def get_form_kwargs(self):
kwargs = super(TrainingUpdate, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Training Updated!')
return self.render_to_response(self.get_context_data(form=form))
form_class = TrainingAttendanceForm
@method_decorator(has_projects_access, name='dispatch')
class TrainingDelete(DeleteView):
"""
Training Delete
"""
model = TrainingAttendance
success_url = '/formlibrary/training_list/0/'
template_name = 'formlibrary/training_confirm_delete.html'
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Training Deleted!')
return self.render_to_response(self.get_context_data(form=form))
form_class = TrainingAttendanceForm
@method_decorator(has_projects_access, name='dispatch')
class BeneficiaryList(ListView):
"""
Beneficiary
"""
model = Beneficiary
template_name = 'formlibrary/beneficiary_list.html'
def get(self, request, *args, **kwargs):
project_agreement_id = self.kwargs['pk']
countries = getCountry(request.user)
getPrograms = Program.objects.all().filter(funding_status="Funded", country__in=countries).distinct()
if int(self.kwargs['pk']) == 0:
getBeneficiaries = Beneficiary.objects.all().filter(Q(training__program__country__in=countries) | Q(distribution__program__country__in=countries) )
else:
getBeneficiaries = Beneficiary.objects.all().filter(training__id=self.kwargs['pk'])
return render(request, self.template_name, {'getBeneficiaries': getBeneficiaries, 'project_agreement_id': project_agreement_id, 'getPrograms': getPrograms})
@method_decorator(has_projects_access, name='dispatch')
class BeneficiaryCreate(CreateView):
"""
Beneficiary Form
"""
model = Beneficiary
@method_decorator(group_excluded('ViewOnly', url='workflow/permission'))
def dispatch(self, request, *args, **kwargs):
try:
self.guidance = FormGuidance.objects.get(form="Beneficiary")
except FormGuidance.DoesNotExist:
self.guidance = None
return super(BeneficiaryCreate, self).dispatch(request, *args, **kwargs)
def get_initial(self):
initial = {
'training': self.kwargs['id'],
}
return initial
# add the request to the kwargs
def get_form_kwargs(self):
kwargs = super(BeneficiaryCreate, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Beneficiary Created!')
latest = Beneficiary.objects.latest('id')
redirect_url = '/formlibrary/beneficiary_update/' + str(latest.id)
return HttpResponseRedirect(redirect_url)
form_class = BeneficiaryForm
@method_decorator(has_projects_access, name='dispatch')
class BeneficiaryUpdate(UpdateView):
"""
Training Form
"""
model = Beneficiary
@method_decorator(group_excluded('ViewOnly', url='workflow/permission'))
def dispatch(self, request, *args, **kwargs):
try:
self.guidance = FormGuidance.objects.get(form="Beneficiary")
except FormGuidance.DoesNotExist:
self.guidance = None
return super(BeneficiaryUpdate, self).dispatch(request, *args, **kwargs)
# add the request to the kwargs
def get_form_kwargs(self):
kwargs = super(BeneficiaryUpdate, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Beneficiary Updated!')
return self.render_to_response(self.get_context_data(form=form))
form_class = BeneficiaryForm
@method_decorator(has_projects_access, name='dispatch')
class BeneficiaryDelete(DeleteView):
"""
Beneficiary Delete
"""
model = Beneficiary
success_url = reverse_lazy('beneficiary_list')
@method_decorator(group_excluded('ViewOnly', url='workflow/permission'))
def dispatch(self, request, *args, **kwargs):
return super(BeneficiaryDelete, self).dispatch(request, *args, **kwargs)
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Beneficiary Deleted!')
return self.render_to_response(self.get_context_data(form=form))
form_class = BeneficiaryForm
@method_decorator(has_projects_access, name='dispatch')
class DistributionList(ListView):
"""
Distribution
"""
model = Distribution
template_name = 'formlibrary/distribution_list.html'
def get(self, request, *args, **kwargs):
program_id = self.kwargs['pk']
countries = getCountry(request.user)
getPrograms = Program.objects.all().filter(funding_status="Funded", country__in=countries).distinct()
if int(self.kwargs['pk']) == 0:
getDistribution = Distribution.objects.all().filter(program__country__in=countries)
else:
getDistribution = Distribution.objects.all().filter(program_id=self.kwargs['pk'])
return render(request, self.template_name, {'getDistribution': getDistribution, 'program_id': program_id, 'getPrograms': getPrograms})
@method_decorator(has_projects_access, name='dispatch')
class DistributionCreate(CreateView):
"""
Distribution Form
"""
model = Distribution
@method_decorator(group_excluded('ViewOnly', url='workflow/permission'))
def dispatch(self, request, *args, **kwargs):
try:
self.guidance = FormGuidance.objects.get(form="Distribution")
except FormGuidance.DoesNotExist:
self.guidance = None
return super(DistributionCreate, self).dispatch(request, *args, **kwargs)
# add the request to the kwargs
def get_form_kwargs(self):
kwargs = super(DistributionCreate, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def get_initial(self):
initial = {
'program': self.kwargs['id']
}
return initial
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Distribution Created!')
latest = Distribution.objects.latest('id')
redirect_url = '/formlibrary/distribution_update/' + str(latest.id)
return HttpResponseRedirect(redirect_url)
form_class = DistributionForm
@method_decorator(has_projects_access, name='dispatch')
class DistributionUpdate(UpdateView):
"""
Distribution Form
"""
model = Distribution
@method_decorator(group_excluded('ViewOnly', url='workflow/permission'))
def dispatch(self, request, *args, **kwargs):
try:
self.guidance = FormGuidance.objects.get(form="Distribution")
except FormGuidance.DoesNotExist:
self.guidance = None
return super(DistributionUpdate, self).dispatch(request, *args, **kwargs)
# add the request to the kwargs
def get_form_kwargs(self):
kwargs = super(DistributionUpdate, self).get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Distribution Updated!')
return self.render_to_response(self.get_context_data(form=form))
form_class = DistributionForm
@method_decorator(has_projects_access, name='dispatch')
class DistributionDelete(DeleteView):
"""
Distribution Delete
"""
model = Distribution
success_url = '/formlibrary/distribution_list/0/'
template_name = 'formlibrary/distribution_confirm_delete.html'
def form_invalid(self, form):
messages.error(self.request, 'Invalid Form', fail_silently=False)
return self.render_to_response(self.get_context_data(form=form))
def form_valid(self, form):
form.save()
messages.success(self.request, 'Success, Distribution Deleted!')
return self.render_to_response(self.get_context_data(form=form))
form_class = DistributionForm
#Ajax views for ajax filters and paginators
@method_decorator(has_projects_access, name='dispatch')
class TrainingListObjects(View, AjaxableResponseMixin):
def get(self, request, *args, **kwargs):
program_id = int(self.kwargs['program'])
project_id = int(self.kwargs['project'])
print project_id
countries = getCountry(request.user)
if int(self.kwargs['program']) == 0:
getTraining = TrainingAttendance.objects.all().filter(program__country__in=countries).values('id', 'create_date', 'training_name', 'project_agreement__project_name')
elif program_id != 0 and project_id == 0:
getTraining = TrainingAttendance.objects.all().filter(program=program_id).values('id','create_date', 'training_name', 'project_agreement__project_name')
else:
getTraining = TrainingAttendance.objects.all().filter(program_id=program_id, project_agreement_id=project_id).values('id','create_date', 'training_name', 'project_agreement__project_name')
getTraining = json.dumps(list(getTraining), cls=DjangoJSONEncoder)
final_dict = {'getTraining': getTraining}
return JsonResponse(final_dict, safe=False)
@method_decorator(has_projects_access, name='dispatch')
class BeneficiaryListObjects(View, AjaxableResponseMixin):
def get(self, request, *args, **kwargs):
program_id = int(self.kwargs['program'])
project_id = int(self.kwargs['project'])
countries = getCountry(request.user)
if program_id == 0:
getBeneficiaries = Beneficiary.objects.all().filter(Q(training__program__country__in=countries) | Q(distribution__program__country__in=countries) ).values('id', 'beneficiary_name', 'create_date')
elif program_id !=0 and project_id == 0:
getBeneficiaries = Beneficiary.objects.all().filter(program__id=program_id).values('id', 'beneficiary_name', 'create_date')
else:
getBeneficiaries = Beneficiary.objects.all().filter(program__id=program_id, training__project_agreement=project_id).values('id', 'beneficiary_name', 'create_date')
getBeneficiaries = json.dumps(list(getBeneficiaries), cls=DjangoJSONEncoder)
final_dict = {'getBeneficiaries': getBeneficiaries}
return JsonResponse(final_dict, safe=False)
@method_decorator(has_projects_access, name='dispatch')
class DistributionListObjects(View, AjaxableResponseMixin):
def get(self, request, *args, **kwargs):
program_id = int(self.kwargs['program'])
project_id = int(self.kwargs['project'])
countries = getCountry(request.user)
if program_id == 0:
getDistribution = Distribution.objects.all().filter(program__country__in=countries).values('id', 'distribution_name', 'create_date', 'program')
elif program_id !=0 and project_id == 0:
getDistribution = Distribution.objects.all().filter(program_id=program_id).values('id', 'distribution_name', 'create_date', 'program')
else:
getDistribution = Distribution.objects.all().filter(program_id=program_id, initiation_id=project_id).values('id', 'distribution_name', 'create_date', 'program')
getDistribution = json.dumps(list(getDistribution), cls=DjangoJSONEncoder)
final_dict = {'getDistribution': getDistribution}
return JsonResponse(final_dict, safe=False)
#program and project & training filters
@method_decorator(has_projects_access, name='dispatch')
class GetAgreements(View, AjaxableResponseMixin):
def get(self, request, *args, **kwargs):
program_id = self.kwargs['program']
countries = getCountry(request.user)
if program_id != 0:
getAgreements = ProjectAgreement.objects.all().filter(program = program_id).values('id', 'project_name')
else:
pass
final_dict = {}
if getAgreements:
getAgreements = json.dumps(list(getAgreements), cls=DjangoJSONEncoder)
final_dict = {'getAgreements': getAgreements}
return JsonResponse(final_dict, safe=False)
| 35.330629
| 207
| 0.69698
| 1,890
| 17,418
| 6.226984
| 0.086243
| 0.035517
| 0.030334
| 0.035347
| 0.774237
| 0.763106
| 0.725975
| 0.71272
| 0.671425
| 0.648653
| 0
| 0.001066
| 0.192158
| 17,418
| 492
| 208
| 35.402439
| 0.835335
| 0.01487
| 0
| 0.645161
| 0
| 0
| 0.108972
| 0.025633
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.003226
| 0.054839
| null | null | 0.003226
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ad29c5e83e977db31fd6cea4b47218bae2db1c25
| 190
|
py
|
Python
|
mlfinlab/filters/__init__.py
|
scibol/mlfinlab
|
3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984
|
[
"BSD-3-Clause"
] | 8
|
2020-04-19T08:09:34.000Z
|
2022-03-30T20:49:40.000Z
|
mlfinlab/filters/__init__.py
|
scibol/mlfinlab
|
3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984
|
[
"BSD-3-Clause"
] | 1
|
2019-07-24T17:52:30.000Z
|
2019-07-24T17:52:30.000Z
|
mlfinlab/filters/__init__.py
|
scibol/mlfinlab
|
3c80f269bc68b8cb9bcf863ceb3dc77fc14b6984
|
[
"BSD-3-Clause"
] | 8
|
2020-08-09T02:25:04.000Z
|
2022-03-20T15:08:11.000Z
|
"""
Logic regarding the various types of filters:
* CUSUM Filter
* Z-score filter
"""
from mlfinlab.filters.filters import cusum_filter
from mlfinlab.filters.filters import z_score_filter
| 19
| 51
| 0.794737
| 27
| 190
| 5.481481
| 0.518519
| 0.148649
| 0.162162
| 0.337838
| 0.513514
| 0.513514
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 190
| 9
| 52
| 21.111111
| 0.89697
| 0.410526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ad3074034fd8f8e01aada730d671298cb593240e
| 162
|
py
|
Python
|
libunittest/whatever/SomeApi.py
|
randomsilo/python-import-sample
|
128a9126d6506905bc0fa2bce69c9c83a427fe63
|
[
"MIT"
] | null | null | null |
libunittest/whatever/SomeApi.py
|
randomsilo/python-import-sample
|
128a9126d6506905bc0fa2bce69c9c83a427fe63
|
[
"MIT"
] | null | null | null |
libunittest/whatever/SomeApi.py
|
randomsilo/python-import-sample
|
128a9126d6506905bc0fa2bce69c9c83a427fe63
|
[
"MIT"
] | null | null | null |
class SomeCls():
""" class SomeCls """
def __init__(self):
pass
def returns_true(self):
""" returns_true """
return True
| 12.461538
| 28
| 0.518519
| 16
| 162
| 4.875
| 0.5625
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.351852
| 162
| 12
| 29
| 13.5
| 0.742857
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.2
| 0
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
ad3e5810e940a3f5e7259e5ac4ab0b8639dd9b0b
| 91
|
py
|
Python
|
accounts/admin.py
|
ProjectFFF/FFF
|
a563e2bb5aafe18d3fa3143d83b6558921eac8ee
|
[
"BSD-2-Clause"
] | 6
|
2020-09-02T18:48:28.000Z
|
2022-02-06T11:13:06.000Z
|
accounts/admin.py
|
ProjectFFF/FFF
|
a563e2bb5aafe18d3fa3143d83b6558921eac8ee
|
[
"BSD-2-Clause"
] | 23
|
2020-09-04T08:57:28.000Z
|
2020-10-25T07:03:47.000Z
|
accounts/admin.py
|
ProjectFFF/FFF
|
a563e2bb5aafe18d3fa3143d83b6558921eac8ee
|
[
"BSD-2-Clause"
] | null | null | null |
from django.contrib import admin
from .models import Member
admin.site.register(Member)
| 13
| 32
| 0.802198
| 13
| 91
| 5.615385
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 91
| 7
| 33
| 13
| 0.924051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ad4711b204ec661dee1fd904e31bcb8cfed0eb02
| 196
|
py
|
Python
|
main/PluginDemos/viscosity/Simulation/viscosityDemo.py
|
JulianoGianlupi/nh-cc3d-4x-base-tool
|
c0f4aceebd4c5bf3ec39e831ef851e419b161259
|
[
"CC0-1.0"
] | null | null | null |
main/PluginDemos/viscosity/Simulation/viscosityDemo.py
|
JulianoGianlupi/nh-cc3d-4x-base-tool
|
c0f4aceebd4c5bf3ec39e831ef851e419b161259
|
[
"CC0-1.0"
] | null | null | null |
main/PluginDemos/viscosity/Simulation/viscosityDemo.py
|
JulianoGianlupi/nh-cc3d-4x-base-tool
|
c0f4aceebd4c5bf3ec39e831ef851e419b161259
|
[
"CC0-1.0"
] | 1
|
2021-02-26T21:50:29.000Z
|
2021-02-26T21:50:29.000Z
|
from cc3d import CompuCellSetup
from viscosityDemoSteppables import viscosityDemoSteppable
CompuCellSetup.register_steppable(steppable=viscosityDemoSteppable(frequency=1))
CompuCellSetup.run()
| 24.5
| 80
| 0.882653
| 17
| 196
| 10.117647
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010929
| 0.066327
| 196
| 7
| 81
| 28
| 0.928962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ad96f97325f83f3e2e7f5a2d218eb8a5347cd466
| 74
|
py
|
Python
|
sysconfig.py
|
YakDriver/kort-minnesvard
|
e082981c078440e8a6a5491ad0ef1c4a6b643c62
|
[
"Apache-2.0"
] | null | null | null |
sysconfig.py
|
YakDriver/kort-minnesvard
|
e082981c078440e8a6a5491ad0ef1c4a6b643c62
|
[
"Apache-2.0"
] | null | null | null |
sysconfig.py
|
YakDriver/kort-minnesvard
|
e082981c078440e8a6a5491ad0ef1c4a6b643c62
|
[
"Apache-2.0"
] | null | null | null |
import distutils
print(distutils.sysconfig.get_config_var('CONFIG_ARGS'))
| 24.666667
| 56
| 0.851351
| 10
| 74
| 6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040541
| 74
| 3
| 56
| 24.666667
| 0.84507
| 0
| 0
| 0
| 0
| 0
| 0.146667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
0f017545fb073a3d1c550ebc6c46014839f663c5
| 54
|
py
|
Python
|
gcf_dev_gen/gcf_dev_gen.py
|
aitaro/gcf_dev_gen
|
d3d13f559d0cfe8bb23c03d5efce1da99534e02f
|
[
"MIT"
] | null | null | null |
gcf_dev_gen/gcf_dev_gen.py
|
aitaro/gcf_dev_gen
|
d3d13f559d0cfe8bb23c03d5efce1da99534e02f
|
[
"MIT"
] | null | null | null |
gcf_dev_gen/gcf_dev_gen.py
|
aitaro/gcf_dev_gen
|
d3d13f559d0cfe8bb23c03d5efce1da99534e02f
|
[
"MIT"
] | null | null | null |
"""Main module."""
def main():
print('package')
| 9
| 20
| 0.537037
| 6
| 54
| 4.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203704
| 54
| 5
| 21
| 10.8
| 0.674419
| 0.222222
| 0
| 0
| 0
| 0
| 0.194444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
0f590e1d9fee82930e8ca69c057be70655dfeb55
| 194
|
py
|
Python
|
src/checker/__main__.py
|
el-yurchito/home-assignment
|
9995187aab3ceef0389c5635d13c6cc78560fd07
|
[
"MIT"
] | null | null | null |
src/checker/__main__.py
|
el-yurchito/home-assignment
|
9995187aab3ceef0389c5635d13c6cc78560fd07
|
[
"MIT"
] | null | null | null |
src/checker/__main__.py
|
el-yurchito/home-assignment
|
9995187aab3ceef0389c5635d13c6cc78560fd07
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from checker.worker import Worker
config_file_path = os.environ.get("CHECKER_CONFIG", "./checker/config.json")
worker = Worker(config_file_path)
worker.run()
| 21.555556
| 76
| 0.737113
| 28
| 194
| 4.928571
| 0.535714
| 0.173913
| 0.231884
| 0.289855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00578
| 0.108247
| 194
| 8
| 77
| 24.25
| 0.791908
| 0.108247
| 0
| 0
| 0
| 0
| 0.204678
| 0.122807
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0f612f14cfe86c2a0d779a227bd105583d282453
| 171
|
py
|
Python
|
pygenetics/__init__.py
|
tjkessler/PyGenetics
|
8cd05e0a58a29fe93063393850daccf9d5ad6bf4
|
[
"MIT"
] | 2
|
2018-07-26T12:59:46.000Z
|
2019-08-20T07:49:52.000Z
|
pygenetics/__init__.py
|
tjkessler/PyGenetics
|
8cd05e0a58a29fe93063393850daccf9d5ad6bf4
|
[
"MIT"
] | null | null | null |
pygenetics/__init__.py
|
tjkessler/PyGenetics
|
8cd05e0a58a29fe93063393850daccf9d5ad6bf4
|
[
"MIT"
] | 1
|
2021-01-12T08:55:11.000Z
|
2021-01-12T08:55:11.000Z
|
from pygenetics.population import Population
from pygenetics.member import Member
from pygenetics.parameter import Parameter
import pygenetics.utils
__version__ = '1.0.0'
| 28.5
| 44
| 0.847953
| 22
| 171
| 6.409091
| 0.454545
| 0.297872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019481
| 0.099415
| 171
| 5
| 45
| 34.2
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0.02924
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0f678abee6a40b81a0f1d23d7b027537de238384
| 216
|
py
|
Python
|
multiscalegnn/models/__init__.py
|
qlinhta/MultiScaleGNN
|
97dc50e05d484b61eabe62a010a1669b2d20de52
|
[
"MIT"
] | 31
|
2018-03-25T01:31:36.000Z
|
2022-01-05T10:03:02.000Z
|
multiscalegnn/models/__init__.py
|
qlinhta/MultiScaleGNN
|
97dc50e05d484b61eabe62a010a1669b2d20de52
|
[
"MIT"
] | null | null | null |
multiscalegnn/models/__init__.py
|
qlinhta/MultiScaleGNN
|
97dc50e05d484b61eabe62a010a1669b2d20de52
|
[
"MIT"
] | 10
|
2018-02-14T20:06:59.000Z
|
2020-12-28T10:45:25.000Z
|
from __future__ import print_function
from __future__ import division
from .models import GNNModular, IndexModule, GNNMultiClass, GNNAtomic, GMul2
from .utils import *
from .snapload import *
from .permute4 import *
| 30.857143
| 76
| 0.819444
| 26
| 216
| 6.461538
| 0.576923
| 0.119048
| 0.190476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.12963
| 216
| 6
| 77
| 36
| 0.882979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.166667
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0f6a96695be42c780df7e54f3aa7e1382c54ceac
| 13,880
|
py
|
Python
|
src/Deprecated/Alg_nD.py
|
somu15/Small_Pf_code
|
35f3d28faab2aa80f2332499f5e7ab19b040eabe
|
[
"MIT"
] | null | null | null |
src/Deprecated/Alg_nD.py
|
somu15/Small_Pf_code
|
35f3d28faab2aa80f2332499f5e7ab19b040eabe
|
[
"MIT"
] | null | null | null |
src/Deprecated/Alg_nD.py
|
somu15/Small_Pf_code
|
35f3d28faab2aa80f2332499f5e7ab19b040eabe
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 23:10:37 2020
@author: dhulls
"""
from os import sys
import pathlib
import numpy as np
import pandas as pd
import seaborn as sns
import random
from scipy.stats import lognorm
from scipy.stats import norm
from scipy.stats import rayleigh
from scipy.stats import uniform
from scipy.stats import cauchy
import matplotlib.pyplot as plt
from UQpy.SampleMethods import MH
from UQpy.Distributions import Distribution
import time
from UQpy.Distributions import Normal
from UQpy.SampleMethods import MMH
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
tf.enable_v2_behavior()
from LimitStateFunctions import LimitStateFunctions as LSF
from ML_TF import ML_TF
from DrawRandom import DrawRandom as DR
from pyDOE import *
Ndim = 8
value = 250
LS1 = LSF()
DR1 = DR()
num_s = 500
## Training GP
def Norm1(X1,X):
return (X1-np.mean(X,axis=0))/(np.std(X,axis=0))
def Norm2(X1,X):
return (X1-np.mean(X,axis=0))/(np.std(X,axis=0))
# def InvNorm1(X1,X):
# return X1 # (X1*np.std(X,axis=0)+np.mean(X,axis=0))
def InvNorm2(X1,X):
return (X1*np.std(X,axis=0)+np.mean(X,axis=0))
Ninit_GP = 50
lhd = DR1.BoreholeLHS(Ninit_GP) # uniform(loc=-3.5,scale=7.0).ppf(lhd0) #
inp_LFtrain = lhd
y_HF_LFtrain = LS1.Scalar_Borehole_HF_nD(inp_LFtrain)
ML0 = ML_TF(obs_ind = Norm1(inp_LFtrain,inp_LFtrain), obs = Norm2(y_HF_LFtrain,y_HF_LFtrain), amp_init=1.0, len_init=1.0, var_init=1.0, num_iters = 1000)
amp0, len0, var0 = ML0.GP_train()
Ninit_GP = 12
lhd = DR1.BoreholeLHS(Ninit_GP)
inp_GPtrain = lhd
samples0 = ML0.GP_predict(amplitude_var = amp0, length_scale_var=len0, observation_noise_variance_var=var0, pred_ind = Norm1(inp_GPtrain,inp_LFtrain), num_samples=num_s)
y_LF_GP = np.array(InvNorm2(np.mean(np.array(samples0),axis=0),y_HF_LFtrain))
y_HF_GP = np.array((LS1.Scalar_Borehole_HF_nD(inp_GPtrain)))
y_GPtrain = y_HF_GP - y_LF_GP
ML = ML_TF(obs_ind = Norm1(inp_GPtrain,inp_GPtrain), obs = y_GPtrain, amp_init=1., len_init=1., var_init=1., num_iters = 1000)
amp1, len1, var1 = ML.GP_train()
Iters = 300
# y_HF_LFtrain = np.empty(1, dtype = float)
# inp_LFtrain = np.empty([1,Ndim], dtype = float)
# for ii in np.arange(0,Ninit_GP,1):
# inp = lhd[ii,:].reshape(Ndim)
# inpp = inp[None, :]
# inp_LFtrain = np.concatenate((inp_LFtrain, inp.reshape(1,Ndim)))
# y_HF_LFtrain = np.concatenate((y_HF_LFtrain, np.array((LS1.Scalar_Borehole_HF_nD(inpp))).reshape(1)))
# inp_LFtrain = np.delete(inp_LFtrain, 0, 0)
# y_HF_LFtrain = np.delete(y_HF_LFtrain, 0)
# Iters = 300
# lhd = DR1.BoreholeLHS(200)
# y_LF_GP = np.empty(1, dtype = float)
# y_HF_GP = np.empty(1, dtype = float)
# inp_GPtrain = np.empty([1,Ndim], dtype = float)
# y_GPtrain = np.empty(1, dtype = float)
# Ninit_GP = 12
# for ii in np.arange(0,Ninit_GP,1):
# inp = lhd[ii,:].reshape(Ndim)
# inpp = inp[None, :]
# inp_GPtrain = np.concatenate((inp_GPtrain, inp.reshape(1,Ndim)))
# samples0 = ML0.GP_predict(amplitude_var = amp0, length_scale_var=len0, observation_noise_variance_var=var0, pred_ind = Norm1(inpp,inp_LFtrain), num_samples=num_s)
# y_LF_GP = np.concatenate((y_LF_GP, np.array(InvNorm2(np.mean(np.array(samples0),axis=0),y_HF_LFtrain)).reshape(1)))
# y_HF_GP = np.concatenate((y_HF_GP, np.array((LS1.Scalar_Borehole_HF_nD(inpp))).reshape(1)))
# y_GPtrain = np.concatenate((y_GPtrain, (np.array((LS1.Scalar_Borehole_HF_nD(inpp))-np.array(InvNorm2(np.mean(np.array(samples0),axis=0),y_HF_LFtrain)))).reshape(1)))
# inp_GPtrain = np.delete(inp_GPtrain, 0, 0)
# y_LF_GP = np.delete(y_LF_GP, 0)
# y_HF_GP = np.delete(y_HF_GP, 0)
# y_GPtrain = np.delete(y_GPtrain, 0)
# y_GPtrain = (y_HF_GP-y_LF_GP)
# ML = ML_TF(obs_ind = Norm1(inp_GPtrain,inp_GPtrain), obs = y_GPtrain, amp_init=1., len_init=1., var_init=1., num_iters = 1000)
# amp1, len1, var1 = ML.GP_train()
## Subset simultion with HF-LF and GP
uni = uniform()
Nsub = 250
Psub = 0.1
Nlim = 5
y1 = np.zeros((Nsub,Nlim))
y1_lim = np.zeros(Nlim)
y1_lim[Nlim-1] = value
inp1 = np.zeros((Nsub,Ndim,Nlim))
rv = norm(loc=0,scale=1)
u_lim_vec = np.array([2,2,2,2,2,2,2,2,2])
u_GP = np.empty(1, dtype = float)
var_GP = np.empty(1, dtype = float)
var_GP[0] = var1.numpy().reshape(1)
subs_info = np.empty(1, dtype = float)
subs_info[0] = np.array(0).reshape(1)
LF_plus_GP = np.empty(1, dtype = float)
GP_pred = np.empty(1, dtype = float)
for ii in np.arange(0,Nsub,1):
inp = DR1.BoreholeRandom()
inpp = inp[None,:]
samples0 = ML0.GP_predict(amplitude_var = amp0, length_scale_var=len0, observation_noise_variance_var=var0, pred_ind = Norm1(inpp,inp_LFtrain), num_samples=num_s)
LF = np.array(np.mean(InvNorm2(np.array(samples0),y_HF_LFtrain),axis=0)).reshape(1)
inp1[ii,:,0] = inp
samples1 = ML.GP_predict(amplitude_var = amp1, length_scale_var=len1, observation_noise_variance_var=var1, pred_ind = Norm1(inpp,inp_GPtrain), num_samples=num_s)
GP_diff = np.mean((np.array(samples1)),axis=0)
u_check = (np.abs(LF + GP_diff))/np.std(np.array(samples1),axis=0)
u_GP = np.concatenate((u_GP, u_check))
u_lim = u_lim_vec[0]
if u_check > u_lim:
y1[ii,0] = LF + GP_diff
else:
y1[ii,0] = np.array((LS1.Scalar_Borehole_HF_nD(inpp))).reshape(1)
inp_GPtrain = np.concatenate((inp_GPtrain, inp.reshape(1,Ndim)))
y_LF_GP = np.concatenate((y_LF_GP, LF))
y_HF_GP = np.concatenate((y_HF_GP, y1[ii,0].reshape(1)))
y_GPtrain = np.concatenate((y_GPtrain, (y1[ii,0].reshape(1)-LF)))
LF_plus_GP = np.concatenate((LF_plus_GP, (LF + np.array(GP_diff).reshape(1))))
GP_pred = np.concatenate((GP_pred, (np.array(GP_diff).reshape(1))))
# ML = ML_TF(obs_ind = (np.array(inp_GPtrain))[:,:,0], obs = (np.array(y_HF_GP)[:,:,0]-np.array(y_LF_GP)[:,:,0])[:,0])
ML = ML_TF(obs_ind = Norm1(inp_GPtrain,inp_GPtrain), obs = y_GPtrain, amp_init=amp1, len_init=len1, var_init=var1, num_iters = Iters)
amp1, len1, var1 = ML.GP_train()
var_GP = np.concatenate((var_GP, var1.numpy().reshape(1)))
subs_info = np.concatenate((subs_info, np.array(0).reshape(1)))
# inpp = np.zeros(Ndim)
count_max = int(Nsub/(Psub*Nsub))
for kk in np.arange(1,Nlim,1):
count = np.inf
ind_max = 0
ind_sto = -1
y1[0:(int(Psub*Nsub)),kk] = np.sort(y1[:,kk-1])[int((1-Psub)*Nsub):(len(y1))]
y1_lim[kk-1] = np.min(y1[0:(int(Psub*Nsub)),kk])
indices = (-y1[:,kk-1]).argsort()[:(int(Psub*Nsub))]
inp1[0:(int(Psub*Nsub)),:,kk] = inp1[indices,:,kk-1]
for ii in np.arange((int(Psub*Nsub)),(Nsub),1):
nxt = np.zeros((1,Ndim))
if count > count_max:
# ind_max = random.randint(0,int(Psub*Nsub))
ind_sto = ind_sto + 1
ind_max = ind_sto
count = 0
else:
ind_max = ii-1
count = count + 1
for jj in np.arange(0,Ndim,1):
if jj == 0:
rv1 = norm(loc=inp1[ind_max,jj,kk],scale=0.1)
else:
rv1 = norm(loc=inp1[ind_max,jj,kk],scale=1.0)
# rv1 = norm(loc=inp1[ind_max,jj,kk],scale=1.0)
prop = (rv1.rvs())
r = np.log(DR1.BoreholePDF(rv_req=prop, index=jj)) - np.log(DR1.BoreholePDF(rv_req=(inp1[ind_max,jj,kk]),index=jj)) # np.log(rv.pdf((prop)))-np.log(rv.pdf((inp1[ind_max,jj,kk])))
if r>np.log(uni.rvs()):
nxt[0,jj] = prop
else:
nxt[0,jj] = inp1[ind_max,jj,kk]
inpp[0,jj] = nxt[0,jj]
samples0 = ML0.GP_predict(amplitude_var = amp0, length_scale_var=len0, observation_noise_variance_var=var0, pred_ind = Norm1(inpp,inp_LFtrain), num_samples=num_s)
LF = np.array(np.mean(InvNorm2(np.array(samples0),y_HF_LFtrain),axis=0)).reshape(1)
samples1 = ML.GP_predict(amplitude_var = amp1, length_scale_var=len1, observation_noise_variance_var=var1, pred_ind = Norm1(inpp,inp_GPtrain), num_samples=num_s)
GP_diff = np.mean((np.array(samples1)),axis=0)
u_check = (np.abs(LF + GP_diff))/np.std(np.array(samples1),axis=0)
u_GP = np.concatenate((u_GP, u_check))
u_lim = u_lim_vec[kk]
if u_check > u_lim: # and ii > (int(Psub*Nsub)+num_retrain):
y_nxt = LF + GP_diff
else:
# y_nxt = np.array((LS1.Scalar_Borehole_HF_nD(inpp))).reshape(1)
# inp_GPtrain = np.concatenate((inp_GPtrain, inp.reshape(1,Ndim)))
# y_LF_GP = np.concatenate((y_LF_GP, LF))
# y_HF_GP = np.concatenate((y_HF_GP, y1[ii,0].reshape(1)))
# LF_plus_GP = np.concatenate((LF_plus_GP, (LF + np.array(GP_diff).reshape(1))))
# GP_pred = np.concatenate((GP_pred, (np.array(GP_diff).reshape(1))))
# ML = ML_TF(obs_ind = inp_GPtrain, obs = (y_HF_GP-y_LF_GP), amp_init=amp1, len_init=len1, var_init=var1, num_iters = Iters)
# amp1, len1, var1 = ML.GP_train()
# var_GP = np.concatenate((var_GP, var1.numpy().reshape(1)))
# subs_info = np.concatenate((subs_info, np.array(kk).reshape(1)))
# GP_diff = 0 ## Comment this
y_nxt = np.array((LS1.Scalar_Borehole_HF_nD(inpp))).reshape(1)
inp_GPtrain = np.concatenate((inp_GPtrain, inp.reshape(1,Ndim)))
y_LF_GP = np.concatenate((y_LF_GP, LF))
y_HF_GP = np.concatenate((y_HF_GP, y_nxt.reshape(1)))
y_GPtrain = np.concatenate((y_GPtrain, (y_nxt.reshape(1)-LF)))
LF_plus_GP = np.concatenate((LF_plus_GP, (LF + np.array(GP_diff).reshape(1))))
GP_pred = np.concatenate((GP_pred, (np.array(GP_diff).reshape(1))))
# ML = ML_TF(obs_ind = (np.array(inp_GPtrain))[:,:,0], obs = (np.array(y_HF_GP)[:,:,0]-np.array(y_LF_GP)[:,:,0])[:,0])
ML = ML_TF(obs_ind = Norm1(inp_GPtrain,inp_GPtrain), obs = y_GPtrain, amp_init=amp1, len_init=len1, var_init=var1, num_iters = Iters)
amp1, len1, var1 = ML.GP_train()
var_GP = np.concatenate((var_GP, var1.numpy().reshape(1)))
subs_info = np.concatenate((subs_info, np.array(0).reshape(1)))
if (y_nxt)>y1_lim[kk-1]:
inp1[ii,:,kk] = inpp
y1[ii,kk] = y_nxt
else:
inp1[ii,:,kk] = inp1[ind_max,:,kk]
y1[ii,kk] = y1[ind_max,kk]
# for kk in np.arange(1,Nlim,1):
# count = np.inf
# ind_max = 0
# ind_sto = -1
# y1[0:(int(Psub*Nsub)),kk] = np.sort(y1[:,kk-1])[int((1-Psub)*Nsub):(len(y1))]
# y1_lim[kk-1] = np.min(y1[0:(int(Psub*Nsub)),kk])
# indices = (-y1[:,kk-1]).argsort()[:(int(Psub*Nsub))]
# inp1[0:(int(Psub*Nsub)),:,kk] = inp1[indices,:,kk-1]
# for ii in np.arange((int(Psub*Nsub)),(Nsub),1):
# nxt = np.zeros((1,Ndim))
# if count > count_max:
# # ind_max = random.randint(0,int(Psub*Nsub))
# ind_sto = ind_sto + 1
# ind_max = ind_sto
# count = 0
# else:
# ind_max = ii-1
# count = count + 1
# for jj in np.arange(0,Ndim,1):
# if jj == 0:
# rv1 = norm(loc=inp1[ind_max,jj,kk],scale=0.1)
# else:
# rv1 = norm(loc=inp1[ind_max,jj,kk],scale=1.0)
# prop = (rv1.rvs())
# r = np.log(DR1.BoreholePDF(rv_req=prop, index=jj)) - np.log(DR1.BoreholePDF(rv_req=(inp1[ind_max,jj,kk]),index=jj)) # rv.pdf((prop))/rv.pdf((inp1[ind_max,jj,kk]))
# if r>np.log(uni.rvs()):
# nxt[0,jj] = prop
# else:
# nxt[0,jj] = inp1[ind_max,jj,kk]
# inpp[0,jj] = nxt[0,jj]
# # inpp = inpp[None,:]
# # inpp = np.array([nxt[0,0], nxt[0,1], nxt[0,2]])[None,:]
# samples0 = ML0.GP_predict(amplitude_var = amp0, length_scale_var=len0, observation_noise_variance_var=var0, pred_ind = Norm1(inpp,inp_LFtrain), num_samples=num_s)
# LF = np.array(np.mean((np.array(samples0)),axis=0)).reshape(1)
# samples1 = ML.GP_predict(amplitude_var = amp1, length_scale_var=len1, observation_noise_variance_var=var1, pred_ind = Norm1(inpp,inp_GPtrain), num_samples=num_s)
# GP_diff = np.mean((np.array(samples1)),axis=0)
# u_check = (np.abs(LF + GP_diff))/np.std(np.array(samples1),axis=0)
# u_GP = np.concatenate((u_GP, u_check))
# u_lim = u_lim_vec[kk]
# if u_check > u_lim: # and ii > (int(Psub*Nsub)+num_retrain):
# y_nxt = LF # + GP_diff
# else:
# y_nxt = np.array((LS1.Scalar_Borehole_HF_nD(inpp))).reshape(1)
# inp_GPtrain = np.concatenate((inp_GPtrain, inp.reshape(1,Ndim)))
# y_LF_GP = np.concatenate((y_LF_GP, LF))
# y_HF_GP = np.concatenate((y_HF_GP, y_nxt.reshape(1))) # np.concatenate((y_HF_GP, y1[ii,0].reshape(1)))
# LF_plus_GP = np.concatenate((LF_plus_GP, (LF + np.array(GP_diff).reshape(1))))
# GP_pred = np.concatenate((GP_pred, (np.array(GP_diff).reshape(1))))
# ML = ML_TF(obs_ind = Norm1(inp_GPtrain,inp_GPtrain), obs = (y_HF_GP-y_LF_GP), amp_init=amp1, len_init=len1, var_init=var1, num_iters = Iters)
# amp1, len1, var1 = ML.GP_train()
# var_GP = np.concatenate((var_GP, var1.numpy().reshape(1)))
# subs_info = np.concatenate((subs_info, np.array(kk).reshape(1)))
# # GP_diff = 0 ## Comment this
# if (y_nxt)>y1_lim[kk-1]:
# inp1[ii,:,kk] = inpp
# y1[ii,kk] = y_nxt
# else:
# inp1[ii,:,kk] = inp1[ind_max,:,kk]
# y1[ii,kk] = y1[ind_max,kk]
Pf = 1
Pi_sto = np.zeros(Nlim)
for kk in np.arange(0,Nlim,1):
Pi = len(np.rot90(np.where(y1[:,kk]>np.min([y1_lim[kk],value]))))/(len(inp1[:,0,0]))
Pi_sto[kk] = Pi
Pf = Pf * Pi
| 44.919094
| 190
| 0.62255
| 2,329
| 13,880
| 3.495492
| 0.089309
| 0.040413
| 0.012898
| 0.016214
| 0.791549
| 0.768947
| 0.748802
| 0.741309
| 0.711706
| 0.711706
| 0
| 0.043095
| 0.207565
| 13,880
| 308
| 191
| 45.064935
| 0.697063
| 0.448487
| 0
| 0.257862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.144654
| 0.018868
| 0.18239
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0f6b1f79671c75fcecfcf78744477c592bd72c1d
| 251
|
py
|
Python
|
winton_kafka_streams/state/__init__.py
|
wintoncode/winton-kafka-streams
|
5867a1c42fc80bba07173fd1d004b2849b429fdf
|
[
"Apache-2.0"
] | 330
|
2017-07-12T09:05:43.000Z
|
2022-03-14T06:44:59.000Z
|
winton_kafka_streams/state/__init__.py
|
sribarrow/winton-kafka-streams
|
5867a1c42fc80bba07173fd1d004b2849b429fdf
|
[
"Apache-2.0"
] | 39
|
2017-07-13T10:36:07.000Z
|
2021-06-14T06:28:38.000Z
|
winton_kafka_streams/state/__init__.py
|
sribarrow/winton-kafka-streams
|
5867a1c42fc80bba07173fd1d004b2849b429fdf
|
[
"Apache-2.0"
] | 71
|
2017-07-12T10:51:55.000Z
|
2021-12-28T08:57:10.000Z
|
from winton_kafka_streams.state.factory.store_factory import StoreFactory
def create(name: str) -> StoreFactory:
# TODO replace this Java-esque factory with a Pythonic DSL as part of the other work on a Streams DSL
return StoreFactory(name)
| 35.857143
| 105
| 0.784861
| 38
| 251
| 5.105263
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163347
| 251
| 6
| 106
| 41.833333
| 0.92381
| 0.394422
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
7e31512d37bbc8a8de3b6acd12c76f55c05dc729
| 38
|
py
|
Python
|
examples/algorithms/__init__.py
|
shenao-zhang/DCPU
|
0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559
|
[
"MIT"
] | 8
|
2020-10-23T07:52:19.000Z
|
2022-03-06T13:35:12.000Z
|
examples/algorithms/__init__.py
|
shenao-zhang/DCPU
|
0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559
|
[
"MIT"
] | 3
|
2021-03-04T13:44:01.000Z
|
2021-03-23T09:57:50.000Z
|
examples/algorithms/__init__.py
|
shenao-zhang/DCPU
|
0da9aa2b7878b54ba4ee4dca894c2e86cdc0d559
|
[
"MIT"
] | 3
|
2021-03-18T08:23:56.000Z
|
2021-07-06T11:20:12.000Z
|
"""Working examples of algorithms."""
| 19
| 37
| 0.710526
| 4
| 38
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.794118
| 0.815789
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7e8eb50a7aac80ea7ec7b8647cf8e45ccb30a092
| 163
|
py
|
Python
|
example_package/some_module.py
|
jerabaul29/example_python_package
|
4c9a47709e0317eaa00e5d78815da9568cbe51d0
|
[
"MIT"
] | null | null | null |
example_package/some_module.py
|
jerabaul29/example_python_package
|
4c9a47709e0317eaa00e5d78815da9568cbe51d0
|
[
"MIT"
] | 2
|
2021-05-05T20:51:44.000Z
|
2021-05-09T20:11:07.000Z
|
example_package/some_module.py
|
jerabaul29/example_python_package
|
4c9a47709e0317eaa00e5d78815da9568cbe51d0
|
[
"MIT"
] | 1
|
2021-02-01T08:37:28.000Z
|
2021-02-01T08:37:28.000Z
|
import tqdm
def some_module_hello():
print("hello from some_module")
def some_module_42():
for i in tqdm.tqdm(range(5)):
print(i)
return 42
| 14.818182
| 35
| 0.650307
| 26
| 163
| 3.884615
| 0.576923
| 0.29703
| 0.257426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040323
| 0.239264
| 163
| 10
| 36
| 16.3
| 0.774194
| 0
| 0
| 0
| 0
| 0
| 0.135802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.142857
| 0
| 0.571429
| 0.285714
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
7e979f95c0114ff466a76dc2ffaad4422d12e0cb
| 298
|
py
|
Python
|
packer/test_packer.py
|
felipefrocha/automate-k8s-aws
|
a39221cccd1d4c701782751e4620dc9d3ac62e82
|
[
"Apache-2.0"
] | 4
|
2020-06-04T10:49:51.000Z
|
2021-02-09T17:40:51.000Z
|
packer/test_packer.py
|
felipefrocha/automate-k8s-aws
|
a39221cccd1d4c701782751e4620dc9d3ac62e82
|
[
"Apache-2.0"
] | null | null | null |
packer/test_packer.py
|
felipefrocha/automate-k8s-aws
|
a39221cccd1d4c701782751e4620dc9d3ac62e82
|
[
"Apache-2.0"
] | null | null | null |
def test_passwd_file(host):
passwd = host.file("/etc/passwd")
assert passwd.contains("root")
assert passwd.user == "root"
assert passwd.group == "root"
assert passwd.mode == 0o644
def test_nginx_config_file(host):
nginx = host.run("sudo nginx -t")
assert nginx.succeedd
| 29.8
| 37
| 0.677852
| 41
| 298
| 4.804878
| 0.463415
| 0.243655
| 0.243655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0.187919
| 298
| 10
| 38
| 29.8
| 0.797521
| 0
| 0
| 0
| 0
| 0
| 0.120401
| 0
| 0
| 0
| 0
| 0
| 0.555556
| 1
| 0.222222
| false
| 0.666667
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7ea75b31bbe15633a2ff1d1cf16e0d55279ded4b
| 3,433
|
py
|
Python
|
fit/transformers/SResTransformer.py
|
ashesh-0/FourierImageTransformer
|
2b75263cacbc1581d27fefa0ac7167f5791df99d
|
[
"BSD-3-Clause"
] | 69
|
2021-04-07T06:33:20.000Z
|
2022-03-06T16:24:16.000Z
|
fit/transformers/SResTransformer.py
|
ashesh-0/FourierImageTransformer
|
2b75263cacbc1581d27fefa0ac7167f5791df99d
|
[
"BSD-3-Clause"
] | 1
|
2021-08-24T22:53:11.000Z
|
2021-11-04T16:28:09.000Z
|
fit/transformers/SResTransformer.py
|
ashesh-0/FourierImageTransformer
|
2b75263cacbc1581d27fefa0ac7167f5791df99d
|
[
"BSD-3-Clause"
] | 9
|
2021-04-08T01:55:44.000Z
|
2022-03-07T13:57:13.000Z
|
import torch
from fast_transformers.builders import TransformerEncoderBuilder, RecurrentEncoderBuilder
from fast_transformers.masking import TriangularCausalMask
from fit.transformers.PositionalEncoding2D import PositionalEncoding2D
class SResTransformerTrain(torch.nn.Module):
def __init__(self,
d_model,
coords, flatten_order,
attention_type="linear",
n_layers=4,
n_heads=4,
d_query=32,
dropout=0.1,
attention_dropout=0.1):
super(SResTransformerTrain, self).__init__()
self.fourier_coefficient_embedding = torch.nn.Linear(2, d_model // 2)
self.pos_embedding = PositionalEncoding2D(
d_model // 2,
coords=coords,
flatten_order=flatten_order,
persistent=False
)
self.encoder = TransformerEncoderBuilder.from_kwargs(
attention_type=attention_type,
n_layers=n_layers,
n_heads=n_heads,
feed_forward_dimensions=n_heads * d_query * 4,
query_dimensions=d_query,
value_dimensions=d_query,
dropout=dropout,
attention_dropout=attention_dropout
).get()
self.predictor_amp = torch.nn.Linear(
n_heads * d_query,
1
)
self.predictor_phase = torch.nn.Linear(
n_heads * d_query,
1
)
def forward(self, x):
x = self.fourier_coefficient_embedding(x)
x = self.pos_embedding(x)
triangular_mask = TriangularCausalMask(x.shape[1], device=x.device)
y_hat = self.encoder(x, attn_mask=triangular_mask)
y_amp = self.predictor_amp(y_hat)
y_phase = torch.tanh(self.predictor_phase(y_hat))
return torch.cat([y_amp, y_phase], dim=-1)
class SResTransformerPredict(torch.nn.Module):
def __init__(self, d_model, coords, flatten_order,
attention_type="full", n_layers=4, n_heads=4,
d_query=32, dropout=0.1,
attention_dropout=0.1):
super(SResTransformerPredict, self).__init__()
self.fourier_coefficient_embedding = torch.nn.Linear(2, d_model // 2)
self.pos_embedding = PositionalEncoding2D(
d_model // 2,
coords=coords,
flatten_order=flatten_order,
persistent=False
)
self.encoder = RecurrentEncoderBuilder.from_kwargs(
attention_type=attention_type,
n_layers=n_layers,
n_heads=n_heads,
feed_forward_dimensions=n_heads * d_query * 4,
query_dimensions=d_query,
value_dimensions=d_query,
dropout=dropout,
attention_dropout=attention_dropout
).get()
self.predictor_amp = torch.nn.Linear(
n_heads * d_query,
1
)
self.predictor_phase = torch.nn.Linear(
n_heads * d_query,
1
)
def forward(self, x, i=0, memory=None):
x = x.view(x.shape[0], -1)
x = self.fourier_coefficient_embedding(x)
x = self.pos_embedding.forward_i(x, i)
y_hat, memory = self.encoder(x, memory)
y_amp = self.predictor_amp(y_hat)
y_phase = torch.tanh(self.predictor_phase(y_hat))
return torch.cat([y_amp, y_phase], dim=-1), memory
| 33.009615
| 89
| 0.600641
| 389
| 3,433
| 4.992288
| 0.18509
| 0.037075
| 0.040165
| 0.037075
| 0.729145
| 0.729145
| 0.729145
| 0.729145
| 0.729145
| 0.729145
| 0
| 0.016115
| 0.313137
| 3,433
| 103
| 90
| 33.330097
| 0.807464
| 0
| 0
| 0.528736
| 0
| 0
| 0.002913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045977
| false
| 0
| 0.045977
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0e2b8cd7dc6c5cea61e5149a6b298654f85ef89f
| 36
|
py
|
Python
|
tests/app/__init__.py
|
vralex/RumbleRunner
|
eb9889daf90846176af292d4e7411c41dac885c8
|
[
"MIT"
] | 2
|
2022-01-26T15:06:02.000Z
|
2022-02-03T05:14:52.000Z
|
tests/app/__init__.py
|
vralex/RumbleRunner
|
eb9889daf90846176af292d4e7411c41dac885c8
|
[
"MIT"
] | 1
|
2022-02-07T23:50:26.000Z
|
2022-02-07T23:50:26.000Z
|
tests/app/__init__.py
|
vralex/RumbleRunner
|
eb9889daf90846176af292d4e7411c41dac885c8
|
[
"MIT"
] | 1
|
2022-02-07T23:19:16.000Z
|
2022-02-07T23:19:16.000Z
|
# Place tests for custom logic here
| 18
| 35
| 0.777778
| 6
| 36
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 36
| 1
| 36
| 36
| 0.965517
| 0.916667
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0e2bf5b9ea106a7c92af80e9412ff509f3bc82df
| 937
|
py
|
Python
|
codewars/8kyu/mohamedashrafamin/Grasshopper - Summation/main.py
|
mohamedashrafamin/Training_one
|
11748fdde85cdc9083e2b0bde7519b51a7acfa62
|
[
"MIT"
] | null | null | null |
codewars/8kyu/mohamedashrafamin/Grasshopper - Summation/main.py
|
mohamedashrafamin/Training_one
|
11748fdde85cdc9083e2b0bde7519b51a7acfa62
|
[
"MIT"
] | 2
|
2019-01-22T10:53:42.000Z
|
2019-01-31T08:02:48.000Z
|
codewars/8kyu/mohamedashrafamin/Grasshopper - Summation/main.py
|
mohamedashrafamin/Training_one
|
11748fdde85cdc9083e2b0bde7519b51a7acfa62
|
[
"MIT"
] | 13
|
2019-01-22T10:37:42.000Z
|
2019-01-25T13:30:43.000Z
|
def summation(num):
if num == 0:
return 0
return num + summation(num - 1)
def summation1(num):
return sum(range(num + 1))
# Name (time in ns) Min Max Mean StdDev Median IQR Outliers OPS (Mops/s) Rounds Iterations
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# test1 159.7404 (1.0) 739.0976 (1.0) 185.8774 (1.0) 42.3062 (1.0) 178.8139 (1.0) 9.5367 (1.0) 2478;2804 5.3799 (1.0) 49933 100
# test 228.8818 (1.43) 1,029.9683 (1.39) 252.6062 (1.36) 53.0119 (1.25) 240.8028 (1.35) 19.0735 (2.00) 2253;2300 3.9587 (0.74) 39946 100
| 66.928571
| 197
| 0.340448
| 104
| 937
| 3.067308
| 0.682692
| 0.043887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.271667
| 0.359658
| 937
| 13
| 198
| 72.076923
| 0.26
| 0.835646
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
0e30478c4f2cdb7bc3559da97b55288e549982b5
| 15,705
|
py
|
Python
|
buglabs-scripts/bugapps2recipes.py
|
buglabs/oe-buglabs
|
b8a4c4b1358214cd3ac1cf6f85154e9c62b16ce7
|
[
"MIT"
] | 1
|
2017-01-24T09:08:56.000Z
|
2017-01-24T09:08:56.000Z
|
buglabs-scripts/bugapps2recipes.py
|
buglabs/oe-buglabs
|
b8a4c4b1358214cd3ac1cf6f85154e9c62b16ce7
|
[
"MIT"
] | null | null | null |
buglabs-scripts/bugapps2recipes.py
|
buglabs/oe-buglabs
|
b8a4c4b1358214cd3ac1cf6f85154e9c62b16ce7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2.6
#
# Script to generate recipes for BugNet applications
#
# Copyright 2010 Bug Labs Inc.
# Written by Marcin Juszkiewicz <marcin@buglabs.net>
#
# License: GPLv2
#
# To get list of all bugnet apps: http://api.buglabs.net/export?count=300
#
import io
from xml.etree.ElementTree import ElementTree
xmldata = ElementTree()
xmldata.parse("bugapps.xml")
root = xmldata.getroot()
# dict of Java class names to OpenEmbedded recipe names.
# some of entries maps to applications from bugnet
depsdict = {
# dependency recipe name
'audioapitester.pub': 'com.buglabs.app.audiotestcase',
'bugdiscover.pub': 'com.buglabs.app.bugdiscover',
'bugdiscover.pub': 'com.buglabs.app.bugdiscover',
'bug.event': 'com.buglabs.app.buttoneventadapter',
'ch.ethz.iks.r_osgi.channels': 'com.buglabs.app.remoteosgi',
'ch.ethz.iks.r_osgi.messages': 'com.buglabs.app.remoteosgi',
'ch.ethz.iks.r_osgi.service_discovery': 'com.buglabs.app.remoteosgi',
'ch.ethz.iks.r_osgi.types': 'com.buglabs.app.remoteosgi',
'ch.ethz.iks.r_osgi': 'com.buglabs.app.remoteosgi',
'ch.ethz.iks.slp': 'ch.ethz.iks.slp',
'ch.ethz.iks.util': 'com.buglabs.app.remote osgi',
'com.bug.accelerometer.util.pub': 'com.bug.accelerometer.util',
'com.bug.accelerometer.util.pub': 'com.buglabs.app.shakemeasureservice',
'com.buglabs.application': 'com.buglabs.common',
'com.buglabs.bug.jni.accelerometer': 'com.buglabs.bug.jni.accelerometer',
'com.buglabs.bug.accelerometer.pub': 'com.buglabs.bug.jni.accelerometer com.buglabs.bug.module.motion',
'com.buglabs.bug.accelerometer': 'com.buglabs.bug.accelerometer',
'com.buglabs.bug.base.pub': 'com.buglabs.bug.base',
'com.buglabs.bug.bmi.pub': 'com.buglabs.bug.bmi',
'com.buglabs.bug.event': 'com.buglabs.bug.event',
'com.buglabs.bug.jni.accelerometer': 'com.buglabs.bug.jni.accelerometer',
'com.buglabs.bug.jni.audio': 'com.buglabs.bug.jni.audio',
'com.buglabs.bug.jni.basedisplay': 'com.buglabs.bug.jni.basedisplay',
'com.buglabs.bug.jni.base': 'com.buglabs.bug.jni.basedisplay',
'com.buglabs.bug.jni.bluetooth': 'com.buglabs.bug.jni.bluetooth',
'com.buglabs.bug.jni.bugbeep': 'com.buglabs.bug.jni.bugbeep',
'com.buglabs.bug.jni.bugbee': 'com.buglabs.bug.jni.bugbee',
'com.buglabs.bug.jni.camera': 'com.buglabs.bug.jni.camera',
'com.buglabs.bug.jni.common': 'com.buglabs.bug.jni.common',
'com.buglabs.bug.jni.gps': 'com.buglabs.bug.jni.gps',
'com.buglabs.bug.jni.gsm': 'com.buglabs.bug.jni.gsm',
'com.buglabs.bug.jni.input': 'com.buglabs.bug.jni.input',
'com.buglabs.bug.jni.input.pub': 'com.buglabs.bug.jni.input',
'com.buglabs.bug.jni.lcd': 'com.buglabs.bug.jni.lcd',
'com.buglabs.bug.jni.motion': 'com.buglabs.bug.jni.motion',
'com.buglabs.bug.jni.pb': 'com.buglabs.bug.jni.pb',
'com.buglabs.bug.jni.sensor': 'com.buglabs.bug.jni.sensor',
'com.buglabs.bug.jni.vonhippel': 'com.buglabs.bug.jni.vonhippel',
'com.buglabs.bug.jni.xrandr': 'com.buglabs.bug.jni.xrandr',
'com.buglabs.bug.menu.pub': 'com.buglabs.bug.menu',
'com.buglabs.bug.module.pub': 'com.buglabs.bug.module',
'com.buglabs.bug.module.audio.pub': 'com.buglabs.bug.module.audio com.buglabs.bug.audio.common',
'com.buglabs.bug.module.bugbee.pub': 'com.buglabs.bug.module.bugbee',
'com.buglabs.bug.module.camera.pub': 'com.buglabs.bug.module.camera',
'com.buglabs.bug.module.gps.pub': 'com.buglabs.bug.module.gps',
'com.buglabs.bug.module.gsm.pub': 'com.buglabs.bug.module.gsm',
'com.buglabs.bug.module.lcd.pub': 'com.buglabs.bug.module.lcd',
'com.buglabs.bug.module.lcd.swt.pub': 'com.buglabs.app.swtdisplayprovider',
'com.buglabs.bug.module.motion.pub': 'com.buglabs.bug.module.motion',
'com.buglabs.bug.module.sensor.pub': 'com.buglabs.bug.module.sensor',
'com.buglabs.bug.module.vonhippel.pub': 'com.buglabs.bug.module.vonhippel',
'com.buglabs.bug.program.pub': 'com.buglabs.bug.program',
'com.buglabs.bug.service': 'com.buglabs.bug.service',
'com.buglabs.common.regexp.pub': 'com.buglabs.app.com.buglabs.common.regexp',
'com.buglabs.device': 'com.buglabs.common',
'com.buglabs.m2mxml.datatype': 'com.buglabs.app.bugm2mxml',
'com.buglabs.m2mxml.exception': 'com.buglabs.app.bugm2mxml',
'com.buglabs.m2mxml.messages.commands': 'com.buglabs.app.bugm2mxml',
'com.buglabs.m2mxml.messages.percepts': 'com.buglabs.app.bugm2mxml',
'com.buglabs.m2mxml': 'com.buglabs.app.bugm2mxml',
'com.buglabs.menu': 'com.buglabs.bug.menu',
'com.buglabs.module': 'com.buglabs.common',
'com.buglabs.nmea2': 'com.buglabs.nmea',
'com.buglabs.nmea.sentences': 'com.buglabs.nmea',
'com.buglabs.nmea': 'com.buglabs.nmea',
'com.buglabs.osgi.cm': 'com.buglabs.osgi.cm',
'com.buglabs.osgi.http.pub': 'com.buglabs.osgi.http',
'com.buglabs.osgi.http': 'com.buglabs.osgi.http',
'com.buglabs.osgi.sewing.pub.util': 'com.buglabs.osgi.sewing com.sun.javax.servlet',
'com.buglabs.osgi.sewing.pub': 'com.buglabs.osgi.sewing com.sun.javax.servlet',
'com.buglabs.osgi.shell.pub': 'com.buglabs.osgi.shell',
'com.buglabs.osgi.shell': 'com.buglabs.common',
'com.buglabs.services.ws': 'com.buglabs.common',
'com.buglabs.status': 'com.buglabs.common',
'com.buglabs.support': 'com.buglabs.common',
'com.buglabs.tableviewer': 'com.buglabs.osgi.shell',
'com.buglabs.util.simplerestclient': 'com.buglabs.common',
'com.buglabs.util.trackers': 'com.buglabs.common',
'com.buglabs.util': 'com.buglabs.common',
'com.google.zxing.common': 'com.buglabs.app.zxing4bug',
'com.google.zxing': 'com.buglabs.app.zxing4bug',
'continuousmotorcontroller.pub': 'com.buglabs.app.continuousmotorcontroller',
'de.avetana.bluetooth.connection': 'com.buglabs.bug.jni.bluetooth',
'de.avetana.bluetooth.hci': 'com.buglabs.bug.jni.bluetooth',
'de.avetana.bluetooth.l2cap': 'com.buglabs.bug.jni.bluetooth',
'de.avetana.bluetooth.obex': 'com.buglabs.bug.jni.bluetooth',
'de.avetana.bluetooth.rfcomm': 'com.buglabs.bug.jni.bluetooth',
'de.avetana.bluetooth.sdp': 'com.buglabs.bug.jni.bluetooth',
'de.avetana.bluetooth.stack': 'com.buglabs.bug.jni.bluetooth',
'de.avetana.bluetooth.util': 'com.buglabs.bug.jni.bluetooth',
'demonotificationserver.pub': 'com.buglabs.app.demonotificationserver',
'edu.oswego.cs.dl.util.concurrent': 'edu.oswego.cs.dl.util.concurrent',
'freemarker.template': 'com.buglabs.osgi.sewing com.sun.javax.servlet',
'gpsutilities.pub': 'com.buglabs.app.gpsutilities',
'javax.bluetooth': 'com.buglabs.bug.jni.bluetooth',
'javax.obex': 'com.buglabs.bug.jni.bluetooth',
'javax.servlet.http': 'com.sun.javax.servlet',
'javax.servlet.jsp': 'com.sun.javax.servlet',
'javax.servlet.resources': 'com.sun.javax.servlet',
'javax.servlet': 'com.sun.javax.servlet',
'junit.framework': 'com.buglabs.app.blueback',
'latlonconverter.utils': 'com.buglabs.app.latlonconverter',
'latlonconverter': 'com.buglabs.app.latlonconverter',
'menusbtestcase.pub': 'com.buglabs.app.menusbtestcase',
'net.contentobjects.jnotify': 'net.contentobjects.jnotify',
'org.eclipse.spaces.xdrive.handlers': 'com.buglabs.app.org.eclipse.spaces.xdrive',
'org.eclipse.spaces.xdrive.http': 'com.buglabs.app.org.eclipse.spaces.xdrive',
'org.eclipse.spaces.xdrive.spi': 'com.buglabs.app.org.eclipse.spaces.xdrive',
'org.eclipse.spaces.xdrive.tos': 'com.buglabs.app.org.eclipse.spaces.xdrive',
'org.eclipse.spaces.xdrive': 'com.buglabs.app.org.eclipse.spaces.xdrive',
'org.eclipse.swt.accessibility': 'org.eclipse.swt.accessibility',
'org.eclipse.swt.accessibility': 'com.buglabs.app.libswt',
'org.eclipse.swt.browser': 'com.buglabs.app.libswt',
'org.eclipse.swt.custom': 'com.buglabs.app.libswt',
'org.eclipse.swt.dnd': 'com.buglabs.app.libswt',
'org.eclipse.swt.events': 'com.buglabs.app.libswt',
'org.eclipse.swt.graphics': 'com.buglabs.app.libswt',
'org.eclipse.swt.layout': 'com.buglabs.app.libswt',
'org.eclipse.swt.opengl': 'com.buglabs.app.libswt',
'org.eclipse.swt.printing': 'com.buglabs.app.libswt',
'org.eclipse.swt.program': 'com.buglabs.app.libswt',
'org.eclipse.swt.widgets': 'com.buglabs.app.libswt',
'org.eclipse.swt': 'com.buglabs.app.libswt',
'org.osgi.framework': 'com.buglabs.osgi',
'org.osgi.service.cm': 'com.buglabs.osgi',
'org.osgi.service.cm': 'com.buglabs.osgi.cm',
'org.osgi.service.device': 'com.buglabs.osgi',
'org.osgi.service.event': 'com.buglabs.app.eventadmin',
'org.osgi.service.http': 'com.buglabs.osgi',
'org.osgi.service.http': 'com.buglabs.osgi.http',
'org.osgi.service.http.pub': 'com.buglabs.osgi.http',
'org.osgi.service.io': 'com.buglabs.osgi',
'org.osgi.service.jini': 'com.buglabs.osgi',
'org.osgi.service.log': 'com.buglabs.osgi',
'org.osgi.service.obr': 'com.buglabs.osgi.obr',
'org.osgi.service.metatype': 'com.buglabs.osgi',
'org.osgi.service.packageadmin': 'com.buglabs.osgi',
'org.osgi.service.permissionadmin': 'com.buglabs.osgi',
'org.osgi.service.prefs': 'com.buglabs.osgi',
'org.osgi.service.provisioning': 'com.buglabs.osgi',
'org.osgi.service.startlevel': 'com.buglabs.osgi',
'org.osgi.service.upnp': 'com.buglabs.osgi',
'org.osgi.service.url': 'com.buglabs.osgi',
'org.osgi.service.useradmin': 'com.buglabs.osgi',
'org.osgi.service.wireadmin': 'com.buglabs.osgi',
'org.osgi.util.measurement': 'com.buglabs.bug.module.gps',
'org.osgi.util.position': 'com.buglabs.bug.module.gps',
'org.osgi.util.tracker': 'service-tracker',
'org.osgi.util.xml': 'com.buglabs.osgi',
'org.thenesis.midpath.sound.backend.alsa': 'com.buglabs.bug.jni.audio',
'org.thenesis.midpath.sound': 'com.buglabs.bug.audio.common',
'org.thenesis.midpath.sound.codec': 'com.buglabs.bug.audio.common',
'com.jcraft.jogg': 'com.buglabs.bug.audio.common',
'com.jcraft.jorbis': 'com.buglabs.bug.audio.common',
'pmea_image_utils': 'com.buglabs.app.basicpmeaimageutils',
'publicwsadminextender': 'com.buglabs.app.publicwsadminextender',
'serviceproducer.pub': 'com.buglabs.app.serviceproducer',
'shell.pub': 'com.buglabs.app.shellservice',
#'shell.pub': 'com.buglabs.app.bugdash',
'simplebatterymanager.pub': 'com.buglabs.app.simplebatterymanager',
'simplerestclient': 'com.buglabs.app.simplerestclient',
#'simplerestclient': 'com.buglabs.app.bugdash',
'webconfig666': 'com.buglabs.app.webconfig666',
}
# list of known-to-be-broken bugapps
brokenapps = [ '', 'accelerometervisualizer', 'aimmotionnotifier',
'audiomodulebuttontester', 'babycamera', 'buggraph',
'bugmailsample', 'drawpad', 'fifteen', 'flickruppr2',
'flickruppr', 'flyovercamera', 'geriatricassistant',
'gpslogger1.1', 'gpslogger', 'gpsloggersimplegui',
'gpsrawfeedexample', 'ircbotexample', 'jythongps',
'log4jexample', 'motherbugtweetntwitch', 'motorcontrolws',
'networkedbugapp', 'remotecamera', 'remoteservicelogger',
'serialinputdisplay', 'simplelwuitsample',
'swtdisplayprovider', 'twitterbug', 'vhapitester']
for bugapp in root.getchildren():
# needed for generating depsdict
bugappname = ''
broken = 0
for element in bugapp.getchildren():
if element.tag == 'title':
bugappname = element.text.lower().replace(' ', '').replace('_', '')
recipefilename = 'com.buglabs.app.' + bugappname + '.bb'
description = element.text
try:
if brokenapps.index(bugappname):
broken = 1
except:
pass
elif element.tag == 'homepage':
homepage = element.get('url')
elif element.tag == 'description':
if element.text:
# we do not want empty lines, all lines needs to end with \, s/"/' to make BitBake parser happy, kill non-ascii chars
description = element.text.replace("\n\n", "\n").replace("\n", "\\\n").replace('"', "'").encode('ascii', 'ignore')
elif element.tag == 'program_version':
# probably should go to recipefilename
pv = element.text
elif element.tag == 'download':
source = element.get('url')
elif element.tag == 'import_packages':
# few apps lacks dependencies information - akweon works on them (old uploads)
deps = ''
if element.text:
deps = element.text
elif element.tag == 'api_version':
# this field is filled by user so just 15/170 bugapps had it filled
# API = 1.4.x is kind of warranty that it builds
api_version = ''
if element.text:
api_version = element.text
elif element.tag == 'export_packages':
# some bugapps export Java classes for other apps - we need to have it in depsdict
if element.text:
for entry in element.text.split(', '):
try:
if not depsdict[entry]:
print "'%s':\t\t'com.buglabs.app.%s'," % (entry, bugappname)
except:
# entry is not in our dictionary - print it and add into this run
print "'%s':\t\t'com.buglabs.app.%s'," % (entry, bugappname)
depsdict[entry] = bugappname
newdeps = []
if deps:
for dep in deps.split(', '):
try:
newdeps.append(depsdict[dep.replace(' ','')] + ' ')
except:
# we got dependency which is not in depsdict
print "EXC:'%s'" % dep
deps = ''.join(sorted(set(newdeps)))
# output recipe
file = open("out/" + recipefilename, 'w')
file.write("require bug-app.inc\n")
file.write("\n")
file.write("DESCRIPTION = \"%s\"\n" % description)
file.write("HOMEPAGE = \"%s\"\n" % homepage)
file.write("\n")
if deps:
file.write("DEPENDS += \"%s\"\n" % deps)
file.write("\n")
file.write("PV = \"%s\"\n" % pv)
file.write("\n")
file.write("SRC_LINK = \"%s\"\n" % source)
file.write("\n")
file.write("APIVERSION = \"%s\"\n" % api_version)
if broken:
file.write("\n")
file.write("BROKEN = \"1\"")
file.write("\n")
file.close()
| 55.4947
| 133
| 0.606367
| 1,846
| 15,705
| 5.149512
| 0.196641
| 0.236693
| 0.132653
| 0.087524
| 0.559752
| 0.451399
| 0.334105
| 0.25121
| 0.170945
| 0.079213
| 0
| 0.003625
| 0.227189
| 15,705
| 282
| 134
| 55.691489
| 0.7796
| 0.072206
| 0
| 0.102459
| 1
| 0
| 0.600316
| 0.457852
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.004098
| 0.012295
| null | null | 0.016393
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0e54220cda2c6f180a3a4cbad1c5f7d43fcc8afa
| 610
|
py
|
Python
|
soliket/__init__.py
|
itrharrison/SOLikeT-itrharrison
|
e8d92423ba433f15bda3a01463f357647e1ffa8c
|
[
"MIT"
] | 3
|
2021-01-14T17:35:23.000Z
|
2022-02-22T17:31:30.000Z
|
soliket/__init__.py
|
itrharrison/SOLikeT-itrharrison
|
e8d92423ba433f15bda3a01463f357647e1ffa8c
|
[
"MIT"
] | 35
|
2020-06-26T06:47:43.000Z
|
2022-03-31T12:13:07.000Z
|
soliket/__init__.py
|
itrharrison/SOLikeT-itrharrison
|
e8d92423ba433f15bda3a01463f357647e1ffa8c
|
[
"MIT"
] | 9
|
2020-11-20T11:03:32.000Z
|
2022-03-01T19:05:18.000Z
|
from .lensing import LensingLiteLikelihood, LensingLikelihood # noqa: F401
from .gaussian import GaussianLikelihood, MultiGaussianLikelihood # noqa: F401
from .ps import PSLikelihood, BinnedPSLikelihood # noqa: F401
from .clusters import ClusterLikelihood # noqa: F401
from .mflike import MFLike # noqa: F401
from .xcorr import XcorrLikelihood # noqa: F401
try:
import pyccl as ccl # noqa: F401
from .ccl import CCL # noqa: F401
from .cross_correlation import CrossCorrelationLikelihood # noqa: F401
except ImportError:
print('Skipping CCL module as pyCCL is not installed')
pass
| 40.666667
| 79
| 0.765574
| 71
| 610
| 6.56338
| 0.478873
| 0.154506
| 0.180258
| 0.064378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053785
| 0.177049
| 610
| 14
| 80
| 43.571429
| 0.874502
| 0.160656
| 0
| 0
| 0
| 0
| 0.089641
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.076923
| 0.769231
| 0
| 0.769231
| 0.076923
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
0e589c440d021472c7725a58b246dfb23a7e0951
| 156
|
py
|
Python
|
photos/admin.py
|
feddykip/gallery
|
b1f35d02de0de96470991a4ae776d356349f3ab7
|
[
"MIT"
] | null | null | null |
photos/admin.py
|
feddykip/gallery
|
b1f35d02de0de96470991a4ae776d356349f3ab7
|
[
"MIT"
] | null | null | null |
photos/admin.py
|
feddykip/gallery
|
b1f35d02de0de96470991a4ae776d356349f3ab7
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from . models import Photo,Category
admin.site.register(Photo)
admin.site.register(Category)
| 22.285714
| 35
| 0.807692
| 22
| 156
| 5.727273
| 0.545455
| 0.142857
| 0.269841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108974
| 156
| 7
| 36
| 22.285714
| 0.906475
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0e889607cb9b30e497f9cf317b5078abdb12a22d
| 85
|
py
|
Python
|
app/user/api/__init__.py
|
vanwt/cmdb
|
c1539140ab0a20d8e2be98e5d878b46848122316
|
[
"MIT"
] | 1
|
2019-12-15T05:20:42.000Z
|
2019-12-15T05:20:42.000Z
|
app/user/api/__init__.py
|
vanwt/cmdb
|
c1539140ab0a20d8e2be98e5d878b46848122316
|
[
"MIT"
] | 12
|
2020-02-12T03:10:46.000Z
|
2022-02-26T21:21:46.000Z
|
app/user/api/__init__.py
|
vanwt/cmdb
|
c1539140ab0a20d8e2be98e5d878b46848122316
|
[
"MIT"
] | null | null | null |
from .user import *
from .role import *
from .permission import *
from .menu import *
| 21.25
| 25
| 0.729412
| 12
| 85
| 5.166667
| 0.5
| 0.483871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 85
| 4
| 26
| 21.25
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0ea3675b5dfa67e2b44c3f150196fb4c31451825
| 73
|
py
|
Python
|
pyAssignment/Writers/__init__.py
|
CD3/pyAssignment
|
bf618457ff10542b1c1f334c89f48f1de72da32b
|
[
"MIT"
] | 1
|
2020-03-21T15:50:54.000Z
|
2020-03-21T15:50:54.000Z
|
pyAssignment/Writers/__init__.py
|
CD3/pyAssignment
|
bf618457ff10542b1c1f334c89f48f1de72da32b
|
[
"MIT"
] | 22
|
2018-03-24T15:04:35.000Z
|
2022-01-14T20:55:09.000Z
|
pyAssignment/Writers/__init__.py
|
CD3/pyAssignment
|
bf618457ff10542b1c1f334c89f48f1de72da32b
|
[
"MIT"
] | null | null | null |
from .Simple import *
from .BlackboardQuiz import *
from .Latex import *
| 18.25
| 29
| 0.753425
| 9
| 73
| 6.111111
| 0.555556
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 3
| 30
| 24.333333
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7ebff8efda4ca960d4fd1988c7b85d7ee801a6ec
| 131,271
|
py
|
Python
|
perses/tests/testsystems.py
|
hannahbrucemacdonald/perses
|
6b43d200501e587b352dce5aaefef38e4145048b
|
[
"MIT"
] | null | null | null |
perses/tests/testsystems.py
|
hannahbrucemacdonald/perses
|
6b43d200501e587b352dce5aaefef38e4145048b
|
[
"MIT"
] | null | null | null |
perses/tests/testsystems.py
|
hannahbrucemacdonald/perses
|
6b43d200501e587b352dce5aaefef38e4145048b
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
"""
Test systems for perses automated design.
Examples
--------
Alanine dipeptide in various environments (vacuum, implicit, explicit):
>>> from perses.tests.testsystems import AlaninDipeptideSAMS
>>> testsystem = AlanineDipeptideTestSystem()
>>> system_generator = testsystem.system_generator['explicit']
>>> sams_sampler = testsystem.sams_sampler['explicit']
TODO
----
* Have all PersesTestSystem subclasses automatically subjected to a battery of tests.
* Add short descriptions to each class through a class property.
"""
# TODO: Use inexpensive charging methods for small molecules in tests
__author__ = 'John D. Chodera'
################################################################################
# IMPORTS
################################################################################
from simtk import openmm, unit
from simtk.openmm import app
import os, os.path
import sys, math
import numpy as np
from functools import partial
from pkg_resources import resource_filename
from openeye import oechem, oeshape, oeomega
from openmmtools import testsystems
from openmmtools import states
from openmmtools.mcmc import MCMCSampler, LangevinSplittingDynamicsMove
from perses.utils.smallmolecules import sanitizeSMILES, canonicalize_SMILES
from perses.storage import NetCDFStorage, NetCDFStorageView
from perses.rjmc.topology_proposal import OESMILES_OPTIONS
from perses.rjmc.geometry import FFAllAngleGeometryEngine
import tempfile
import copy
from openmmtools.constants import kB
from perses.rjmc.topology_proposal import SystemGenerator
from unittest import skipIf
from perses.dispersed.utils import minimize #updated minimizer
from openmmtools.states import ThermodynamicState, SamplerState
# TODO: Use dummy system generator to work around SystemGenerator issues
#from perses.rjmc.topology_proposal import DummySystemGenerator
#SystemGenerator = DummySystemGenerator
################################################################################
# TEST SYSTEMS
################################################################################
istravis = os.environ.get('TRAVIS', None) == 'true'
class PersesTestSystem(object):
"""
Create a consistent set of samplers useful for testing.
Properties
----------
environments : list of str
Available environments
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler
"""
def __init__(self, storage_filename=None, mode='w', ncmc_nsteps=5, mcmc_nsteps=100):
"""Create a testsystem.
Parameters
----------
storage_filename : str, optional, default=None
If specified, bind to this storage file.
mode : str, optional, default='w'
File open mode, 'w' for (over)write, 'a' for append.
"""
self.storage = None
if storage_filename is not None:
self.storage = NetCDFStorage(storage_filename, mode='w')
self.environments = list()
self.topologies = dict()
self.positions = dict()
self.system_generators = dict()
self.proposal_engines = dict()
self.thermodynamic_states = dict()
self.mcmc_samplers = dict()
self.exen_samplers = dict()
self.sams_samplers = dict()
self.designer = None
self.geometry_engine = FFAllAngleGeometryEngine(metadata={})
self._splitting = "V R O R V"
self._timestep = 1.0*unit.femtosecond
self._ncmc_nsteps = ncmc_nsteps
self._mcmc_nsteps = mcmc_nsteps
self._move = LangevinSplittingDynamicsMove(timestep=self._timestep, splitting=self._splitting, n_restart_attempts=10)
self._move.n_restart_attempts = 10
class AlanineDipeptideTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on alanine dipeptide in various solvents.
This is useful for testing a variety of components.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit', 'implicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AlanineDipeptideTestSystem
>>> testsystem = AlanineDipeptideTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].build_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['implicit']
"""
def __init__(self, constraints=app.HBonds, **kwargs):
super(AlanineDipeptideTestSystem, self).__init__(**kwargs)
environments = ['explicit', 'implicit', 'vacuum']
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Use sterics in proposals
self.geometry_engine.use_sterics = True
# Write atom-by-atom geometry output.
self.geometry_engine.write_proposal_pdb = True
self.geometry_engine.pdb_filename_prefix = 'geometry'
# Create a system generator for our desired forcefields.
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator(['amber99sbildn.xml', 'tip3p.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : constraints },
use_antechamber=False, barostat=barostat)
system_generators['implicit'] = SystemGenerator(['amber99sbildn.xml', 'amber99_obc.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2, 'constraints' : constraints },
use_antechamber=False)
system_generators['vacuum'] = SystemGenerator(['amber99sbildn.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : constraints },
use_antechamber=False)
# Create peptide in solvent.
from openmmtools.testsystems import AlanineDipeptideExplicit, AlanineDipeptideImplicit, AlanineDipeptideVacuum
from pkg_resources import resource_filename
pdb_filename = resource_filename('openmmtools', 'data/alanine-dipeptide-gbsa/alanine-dipeptide.pdb')
from simtk.openmm.app import PDBFile
topologies = dict()
positions = dict()
pdbfile = PDBFile(pdb_filename)
topologies['vacuum'] = pdbfile.getTopology()
positions['vacuum'] = pdbfile.getPositions(asNumpy=True)
topologies['implicit'] = pdbfile.getTopology()
positions['implicit'] = pdbfile.getPositions(asNumpy=True)
# Create molecule in explicit solvent.
modeller = app.Modeller(topologies['vacuum'], positions['vacuum'])
modeller.addSolvent(system_generators['explicit'].getForceField(), model='tip3p', padding=9.0*unit.angstrom)
topologies['explicit'] = modeller.getTopology()
positions['explicit'] = modeller.getPositions()
# Set up the proposal engines.
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = {
'ffxmls' : ['amber99sbildn.xml'], # take sidechain definitions from this ffxml file
'always_change' : True # don't propose self-transitions
}
proposal_engines = dict()
chain_id = ' '
allowed_mutations = [[('2','VAL')],[('2','LEU')],[('2','ILE')]]
for environment in environments:
proposal_engines[environment] = PointMutationEngine(topologies[environment],system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations)
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
thermodynamic_states['explicit'] = states.ThermodynamicState(system=systems['explicit'], temperature=temperature, pressure=pressure)
thermodynamic_states['implicit'] = states.ThermodynamicState(system=systems['implicit'], temperature=temperature)
thermodynamic_states['vacuum'] = states.ThermodynamicState(system=systems['vacuum'], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment == 'explicit':
sampler_state = states.SamplerState(positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
mcmc_samplers[environment].timestep = 1.0 * unit.femtoseconds
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps': 0}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['implicit'] : 1.0, sams_samplers['vacuum'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
class AlanineDipeptideValenceTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on alanine dipeptide in various solvents.
Only valence terms are included---no sterics.
Properties
----------
environments : list of str
Available environments: ['vacuum']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AlanineDipeptideValenceTestSystem
>>> testsystem = AlanineDipeptideValenceTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].build_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['vacuum']
"""
def __init__(self, **kwargs):
super(AlanineDipeptideValenceTestSystem, self).__init__(**kwargs)
environments = ['vacuum']
# Write atom-by-atom geometry output.
self.geometry_engine.write_proposal_pdb = False
#self.geometry_engine.pdb_filename_prefix = 'geometry2'
# Create a system generator for our desired forcefields.
system_generators = dict()
from pkg_resources import resource_filename
valence_xml_filename = resource_filename('perses', 'data/amber99sbildn-valence-only.xml')
system_generators['vacuum'] = SystemGenerator([valence_xml_filename],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=False)
# Create peptide in solvent.
from openmmtools.testsystems import AlanineDipeptideExplicit, AlanineDipeptideImplicit, AlanineDipeptideVacuum
from pkg_resources import resource_filename
pdb_filename = resource_filename('openmmtools', 'data/alanine-dipeptide-gbsa/alanine-dipeptide.pdb')
from simtk.openmm.app import PDBFile
topologies = dict()
positions = dict()
pdbfile = PDBFile(pdb_filename)
topologies['vacuum'] = pdbfile.getTopology()
positions['vacuum'] = pdbfile.getPositions(asNumpy=True)
# Set up the proposal engines.
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = {
'ffxmls' : ['amber99sbildn.xml'], # take sidechain definitions from this ffxml file
'always_change' : True # don't propose self-transitions
}
proposal_engines = dict()
chain_id = ' '
allowed_mutations = [[('2','PHE')]]
proposal_metadata = {"always_change":True}
for environment in environments:
proposal_engines[environment] = PointMutationEngine(topologies[environment],system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations, always_change=True)
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
thermodynamic_states['vacuum'] = states.ThermodynamicState(system=systems['vacuum'], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':50}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum'] : 1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
def load_via_pdbfixer(filename=None, pdbid=None):
"""
Load a PDB file via PDBFixer, keeping all heterogens and building in protons for any crystallographic waters.
"""
from pdbfixer import PDBFixer
fixer = PDBFixer(filename=filename, pdbid=pdbid)
fixer.findMissingResidues()
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
fixer.addMissingHydrogens(7.0)
return [fixer.topology, fixer.positions]
class T4LysozymeMutationTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on T4 lysozyme in various solvents.
Wild Type is T4 L99A
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit', 'implicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import T4LysozymeTestSystem
>>> testsystem = T4LysozymeTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].build_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['implicit']
"""
def __init__(self, **kwargs):
super(T4LysozymeMutationTestSystem, self).__init__(**kwargs)
# environments = ['explicit-complex', 'explicit-receptor', 'implicit-complex', 'implicit-receptor', 'vacuum-complex', 'vacuum-receptor']
environments = ['explicit-complex', 'explicit-receptor', 'vacuum-complex', 'vacuum-receptor']
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Create a system generator for our desired forcefields.
from pkg_resources import resource_filename
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator([gaff_xml_filename,'amber99sbildn.xml', 'tip3p.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True, barostat=barostat)
system_generators['explicit-complex'] = system_generators['explicit']
system_generators['explicit-receptor'] = system_generators['explicit']
system_generators['implicit'] = SystemGenerator([gaff_xml_filename,'amber99sbildn.xml', 'amber99_obc.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2, 'constraints' : None },
use_antechamber=True)
system_generators['implicit-complex'] = system_generators['implicit']
system_generators['implicit-receptor'] = system_generators['implicit']
system_generators['vacuum'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True)
system_generators['vacuum-complex'] = system_generators['vacuum']
system_generators['vacuum-receptor'] = system_generators['vacuum']
# Create receptor in solvent.
from pkg_resources import resource_filename
pdb_filename = resource_filename('perses', 'data/181L.pdb')
import pdbfixer
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
[fixer_topology, fixer_positions] = load_via_pdbfixer(pdb_filename)
modeller = Modeller(fixer_topology, fixer_positions)
residues_to_delete = [ residue for residue in modeller.getTopology().residues() if residue.name in ['HED','CL','HOH'] ]
modeller.delete(residues_to_delete)
receptor_modeller = copy.deepcopy(modeller)
ligand_modeller = copy.deepcopy(modeller)
for chain in receptor_modeller.getTopology().chains():
pass
chains_to_delete = [chain]
receptor_modeller.delete(chains_to_delete)
topologies['receptor'] = receptor_modeller.getTopology()
positions['receptor'] = receptor_modeller.getPositions()
for chain in ligand_modeller.getTopology().chains():
break
chains_to_delete = [chain]
ligand_modeller.delete(chains_to_delete)
for residue in ligand_modeller.getTopology().residues():
if residue.name == 'BNZ':
break
from openmoltools import forcefield_generators
from perses.utils.openeye import extractPositionsFromOEMol, giveOpenmmPositionsToOEMol
import perses.rjmc.geometry as geometry
from perses.rjmc.topology_proposal import TopologyProposal
# create OEMol version of benzene
mol = oechem.OEMol()
#mol.SetTitle('BNZ') # should be set to residue.name in generateTopologyFromOEMol, not working
oechem.OESmilesToMol(mol,'C1=CC=CC=C1')
oechem.OEAddExplicitHydrogens(mol)
oechem.OETriposAtomNames(mol)
oechem.OETriposBondTypeNames(mol)
new_residue = forcefield_generators.generateTopologyFromOEMol(mol)
for res in new_residue.residues():
res.name = 'BNZ'
bnz_new_sys = system_generators['vacuum'].build_system(new_residue)
kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA
temperature = 300.0 * unit.kelvin
kT = kB * temperature
beta = 1.0/kT
adding_hydrogen_proposal = TopologyProposal(new_topology=new_residue, new_system =bnz_new_sys, old_topology=ligand_modeller.topology, old_system =bnz_new_sys, logp_proposal = 0.0, new_to_old_atom_map = {0:0,1:1,2:2,3:3,4:4,5:5}, old_chemical_state_key='',new_chemical_state_key='')
geometry_engine = geometry.FFAllAngleGeometryEngine()
new_positions, logp = geometry_engine.propose(adding_hydrogen_proposal, ligand_modeller.positions, beta)
modeller = copy.deepcopy(receptor_modeller)
modeller.add(new_residue, new_positions)
topologies['complex'] = modeller.getTopology()
positions['complex'] = modeller.getPositions()
# Create all environments.
for environment in ['implicit', 'vacuum']:
for component in ['receptor', 'complex']:
topologies[environment + '-' + component] = topologies[component]
positions[environment + '-' + component] = positions[component]
# Set up in explicit solvent.
for component in ['receptor', 'complex']:
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators['explicit'].getForceField(), model='tip3p', padding=9.0*unit.angstrom)
atoms = list(modeller.topology.atoms())
print('Solvated %s has %s atoms' % (component, len(atoms)))
topologies['explicit' + '-' + component] = modeller.getTopology()
positions['explicit' + '-' + component] = modeller.getPositions()
# Set up the proposal engines.
allowed_mutations = [
[('99','GLY')],
[('102','GLN')],
[('102','HIS')],
[('102','GLU')],
[('102','LEU')],
[('153','ALA')],
[('108','VAL')],
[('99','GLY'),('108','VAL')]
]
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = { 'ffxmls' : ['amber99sbildn.xml'] }
proposal_engines = dict()
chain_id = 'A'
for environment in environments:
proposal_engines[environment] = PointMutationEngine(topologies[environment], system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations)
# Generate systems
systems = dict()
for environment in environments:
print(environment)
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
for component in ['receptor', 'complex']:
thermodynamic_states['explicit' + '-' + component] = states.ThermodynamicState(system=systems['explicit' + '-' + component], temperature=temperature, pressure=pressure)
#thermodynamic_states['implicit' + '-' + component] = ThermodynamicState(system=systems['implicit' + '-' + component], temperature=temperature)
thermodynamic_states['vacuum' + '-' + component] = states.ThermodynamicState(system=systems['vacuum' + '-' + component], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment[0:8] == 'explicit':
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['explicit-complex'] : 1.0, sams_samplers['explicit-receptor'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
class MybTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on Myb:peptide interaction in various solvents.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit', 'implicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import MybTestSystem
>>> testsystem = MybTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum-peptide'].build_system(testsystem.topologies['vacuum-peptide'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['implicit-peptide']
"""
def __init__(self, **kwargs):
super(MybTestSystem, self).__init__(**kwargs)
environments = ['explicit-complex', 'explicit-peptide', 'implicit-complex', 'implicit-peptide', 'vacuum-complex', 'vacuum-peptide']
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Use sterics in proposals
self.geometry_engine.use_sterics = True
# Write atom-by-atom geometry output.
self.geometry_engine.write_proposal_pdb = True
self.geometry_engine.pdb_filename_prefix = 'geometry'
# Create a system generator for our desired forcefields.
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator(['amber99sbildn.xml', 'tip3p.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=False)
system_generators['explicit-complex'] = system_generators['explicit']
system_generators['explicit-peptide'] = system_generators['explicit']
system_generators['implicit'] = SystemGenerator(['amber99sbildn.xml', 'amber99_obc.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2, 'constraints' : None },
use_antechamber=False)
system_generators['implicit-complex'] = system_generators['implicit']
system_generators['implicit-peptide'] = system_generators['implicit']
system_generators['vacuum'] = SystemGenerator(['amber99sbildn.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=False)
system_generators['vacuum-complex'] = system_generators['vacuum']
system_generators['vacuum-peptide'] = system_generators['vacuum']
# Create peptide in solvent.
from pkg_resources import resource_filename
pdb_filename = resource_filename('perses', 'data/1sb0.pdb')
import pdbfixer
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
#pdbfile = PDBFile(pdb_filename)
[fixer_topology, fixer_positions] = load_via_pdbfixer(pdb_filename)
topologies['complex'] = fixer_topology
positions['complex'] = fixer_positions
modeller = Modeller(topologies['complex'], positions['complex'])
chains_to_delete = [ chain for chain in modeller.getTopology().chains() if chain.id == 'A' ] # remove chain A
modeller.delete(chains_to_delete)
topologies['peptide'] = modeller.getTopology()
positions['peptide'] = modeller.getPositions()
# Create all environments.
for environment in ['implicit', 'vacuum']:
for component in ['peptide', 'complex']:
topologies[environment + '-' + component] = topologies[component]
positions[environment + '-' + component] = positions[component]
# Set up in explicit solvent.
for component in ['peptide', 'complex']:
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators['explicit'].getForceField(), model='tip3p', padding=9.0*unit.angstrom)
topologies['explicit' + '-' + component] = modeller.getTopology()
positions['explicit' + '-' + component] = modeller.getPositions()
# Set up the proposal engines.
allowed_mutations = list()
for resid in ['91', '99', '103', '105']:
for resname in ['ALA', 'LEU', 'VAL', 'PHE', 'CYS', 'THR', 'TRP', 'TYR', 'GLU', 'ASP', 'LYS', 'ARG', 'ASN']:
allowed_mutations.append([(resid, resname)])
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = {
'ffxmls' : ['amber99sbildn.xml'], # take sidechain definitions from this ffxml file
'always_change' : True # don't propose self-transitions
}
proposal_engines = dict()
chain_id = 'B'
for environment in environments:
proposal_engines[environment] = PointMutationEngine(topologies[environment], system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations)
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
for component in ['peptide', 'complex']:
thermodynamic_states['explicit' + '-' + component] = states.ThermodynamicState(system=systems['explicit' + '-' + component], temperature=temperature, pressure=pressure)
thermodynamic_states['implicit' + '-' + component] = states.ThermodynamicState(system=systems['implicit' + '-' + component], temperature=temperature)
thermodynamic_states['vacuum' + '-' + component] = states.ThermodynamicState(system=systems['vacuum' + '-' + component], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment[0:8] == 'explicit':
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
00 # reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':0}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum-complex'] : 1.0, sams_samplers['vacuum-peptide'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
class AblImatinibResistanceTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for testing PointMutationEngine on Abl:imatinib.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit', 'implicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AblImatinibResistanceTestSystem
>>> testsystem = AblImatinibResistanceTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum-inhibitor'].build_system(testsystem.topologies['vacuum-inhibitor'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['vacuum-inhibitor']
"""
def __init__(self, **kwargs):
super(AblImatinibResistanceTestSystem, self).__init__(**kwargs)
solvents = ['vacuum', 'explicit'] # TODO: Add 'implicit' once GBSA parameterization for small molecules is working
# solvents = ['vacuum'] # DEBUG
components = ['receptor', 'complex'] # TODO: Add 'ATP:kinase' complex to enable resistance design
padding = 9.0*unit.angstrom
explicit_solvent_model = 'tip3p'
setup_path = 'data/abl-imatinib'
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Construct list of all environments
environments = list()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
environments.append(environment)
# Create a system generator for desired forcefields
from pkg_resources import resource_filename
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml', 'tip3p.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True, barostat=barostat)
system_generators['implicit'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml', 'amber99_obc.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2, 'constraints' : None },
use_antechamber=True)
system_generators['vacuum'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True)
# Copy system generators for all environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
system_generators[environment] = system_generators[solvent]
# Load topologies and positions for all components
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
for component in components:
pdb_filename = resource_filename('perses', os.path.join(setup_path, '%s.pdb' % component))
pdbfile = PDBFile(pdb_filename)
topologies[component] = pdbfile.topology
positions[component] = pdbfile.positions
# Construct positions and topologies for all solvent environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
if solvent == 'explicit':
# Create MODELLER object.
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators[solvent].getForceField(), model='tip3p', padding=9.0*unit.angstrom)
topologies[environment] = modeller.getTopology()
positions[environment] = modeller.getPositions()
else:
environment = solvent + '-' + component
topologies[environment] = topologies[component]
positions[environment] = positions[component]
# Set up resistance mutation proposal engines
allowed_mutations = list()
# TODO: Expand this beyond the ATP binding site
for resid in ['22', '37', '52', '55', '65', '81', '125', '128', '147', '148']:
for resname in ['ALA', 'CYS', 'ASP', 'GLU', 'PHE', 'HIS', 'ILE', 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL', 'TRP', 'TYR']:
allowed_mutations.append([(resid, resname)])
from perses.rjmc.topology_proposal import PointMutationEngine
proposal_metadata = { 'ffxmls' : ['amber99sbildn.xml'] }
proposal_engines = dict()
chain_id = 'A'
for solvent in solvents:
for component in ['complex', 'receptor']: # Mutations only apply to components that contain the kinase
environment = solvent + '-' + component
proposal_engines[environment] = PointMutationEngine(topologies[environment], system_generators[environment], chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations)
# Generate systems ror all environments
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
thermodynamic_states = dict()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
if solvent == 'explicit':
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature)
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_state, sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, self.geometry_engine, proposal_engines[environment], options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
thermodynamic_states[environment] = thermodynamic_state
# Create test MultiTargetDesign sampler.
# TODO: Replace this with inhibitor:kinase and ATP:kinase ratio
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum-complex'] : 1.0, sams_samplers['vacuum-receptor'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.components = components
self.solvents = solvents
self.environments = environments
self.topologies = topologies
self.positions = positions
self.systems = systems
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
# This system must currently be minimized.
minimize_wrapper(self)
class AblAffinityTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for optimizing kinase inhibitor affinity to Abl.
TODO: Generalize to standard inhibitor:protein test system and extend to T4 lysozyme small molecules.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit', 'implicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AblAffinityTestSystem
>>> testsystem = AblAffinityestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum-inhibitor'].build_system(testsystem.topologies['vacuum-inhibitor'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['vacuum-inhibitor']
"""
def __init__(self, **kwargs):
super(AblAffinityTestSystem, self).__init__(**kwargs)
solvents = ['vacuum', 'explicit'] # TODO: Add 'implicit' once GBSA parameterization for small molecules is working
solvents = ['vacuum'] # DEBUG
components = ['inhibitor', 'complex'] # TODO: Add 'ATP:kinase' complex to enable resistance design
padding = 9.0*unit.angstrom
explicit_solvent_model = 'tip3p'
setup_path = 'data/abl-imatinib'
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Construct list of all environments
environments = list()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
environments.append(environment)
# Read SMILES from CSV file of clinical kinase inhibitors.
from pkg_resources import resource_filename
smiles_filename = resource_filename('perses', 'data/clinical-kinase-inhibitors.csv')
import csv
molecules = list()
with open(smiles_filename, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in csvreader:
name = row[0]
smiles = row[1]
molecules.append(smiles)
# Add current molecule
molecules.append('Cc1ccc(cc1Nc2nccc(n2)c3cccnc3)NC(=O)c4ccc(cc4)C[NH+]5CCN(CC5)C')
self.molecules = molecules
# Expand molecules without explicit stereochemistry and make canonical isomeric SMILES.
molecules = sanitizeSMILES(self.molecules)
molecules = canonicalize_SMILES(molecules)
# Create a system generator for desired forcefields
from pkg_resources import resource_filename
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml', 'tip3p.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True, barostat=barostat)
system_generators['implicit'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml', 'amber99_obc.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2, 'constraints' : None },
use_antechamber=True)
system_generators['vacuum'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True)
# Copy system generators for all environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
system_generators[environment] = system_generators[solvent]
# Load topologies and positions for all components
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
for component in components:
pdb_filename = resource_filename('perses', os.path.join(setup_path, '%s.pdb' % component))
print(pdb_filename)
pdbfile = PDBFile(pdb_filename)
topologies[component] = pdbfile.topology
positions[component] = pdbfile.positions
# Construct positions and topologies for all solvent environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
if solvent == 'explicit':
# Create MODELLER object.
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators[solvent].getForceField(), model='tip3p', padding=9.0*unit.angstrom)
topologies[environment] = modeller.getTopology()
positions[environment] = modeller.getPositions()
else:
environment = solvent + '-' + component
topologies[environment] = topologies[component]
positions[environment] = positions[component]
# Set up the proposal engines.
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
proposal_metadata = { }
proposal_engines = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
proposal_engines[environment] = SmallMoleculeSetProposalEngine(molecules, system_generators[environment], residue_name='MOL', storage=storage)
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
for component in components:
for solvent in solvents:
environment = solvent + '-' + component
if solvent == 'explicit':
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
else:
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
if solvent == 'explicit':
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature)
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_state, sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
thermodynamic_states[environment] = thermodynamic_state
# Create test MultiTargetDesign sampler.
# TODO: Replace this with inhibitor:kinase and ATP:kinase ratio
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum-complex'] : 1.0, sams_samplers['vacuum-inhibitor'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
designer.verbose = True
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.systems = systems
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
# This system must currently be minimized.
minimize_wrapper(self)
class AblImatinibProtonationStateTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for sampling protonation states of the Abl:imatinib complex.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit', 'implicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AblImatinibProtonationStateTestSystem
>>> testsystem = AblImatinibProtonationStateTestSystem()
# Build a system
>>> system = testsystem.system_generators['explicit-inhibitor'].build_system(testsystem.topologies['explicit-inhibitor'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['explicit-inhibitor']
"""
def __init__(self, **kwargs):
super(AblImatinibProtonationStateTestSystem, self).__init__(**kwargs)
solvents = ['vacuum', 'explicit'] # TODO: Add 'implicit' once GBSA parameterization for small molecules is working
components = ['inhibitor', 'complex'] # TODO: Add 'ATP:kinase' complex to enable resistance design
#solvents = ['vacuum'] # DEBUG: Just try vacuum for now
#components = ['inhibitor'] # DEBUG: Just try inhibitor for now
padding = 9.0*unit.angstrom
explicit_solvent_model = 'tip3p'
setup_path = 'data/constant-pH/abl-imatinib'
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Construct list of all environments
environments = list()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
environments.append(environment)
# Read mol2 file containing protonation states and extract canonical isomeric SMILES from this.
from pkg_resources import resource_filename
molecules = list()
mol2_filename = resource_filename('perses', os.path.join(setup_path, 'Imatinib-epik-charged.mol2'))
ifs = oechem.oemolistream(mol2_filename)
mol = oechem.OEMol()
while oechem.OEReadMolecule(ifs, mol):
smiles = oechem.OEMolToSmiles(mol)
molecules.append(smiles)
# Read log probabilities
log_state_penalties = dict()
state_penalties_filename = resource_filename('perses', os.path.join(setup_path, 'Imatinib-state-penalties.out'))
for (smiles, log_state_penalty) in zip(molecules, np.fromfile(state_penalties_filename, sep='\n')):
log_state_penalties[smiles] = log_state_penalty
# Add current molecule
smiles = 'Cc1ccc(cc1Nc2nccc(n2)c3cccnc3)NC(=O)c4ccc(cc4)C[NH+]5CCN(CC5)C'
molecules.append(smiles)
self.molecules = molecules
log_state_penalties[smiles] = 100.0 # this should have zero weight
# Expand molecules without explicit stereochemistry and make canonical isomeric SMILES.
molecules = sanitizeSMILES(self.molecules)
# Create a system generator for desired forcefields
# TODO: Debug why we can't ue pregenerated molecule ffxml parameters. This may be an openmoltools issue.
molecules_xml_filename = resource_filename('perses', os.path.join(setup_path, 'Imatinib-epik-charged.ffxml'))
print('Creating system generators...')
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml', 'tip3p.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True, barostat=barostat)
system_generators['implicit'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml', 'amber99_obc.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2, 'constraints' : None },
use_antechamber=True)
system_generators['vacuum'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True)
# Copy system generators for all environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
system_generators[environment] = system_generators[solvent]
# Load topologies and positions for all components
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
for component in components:
pdb_filename = resource_filename('perses', os.path.join(setup_path, '%s.pdb' % component))
print(pdb_filename)
pdbfile = PDBFile(pdb_filename)
topologies[component] = pdbfile.topology
positions[component] = pdbfile.positions
# Construct positions and topologies for all solvent environments
print('Constructing positions and topologies...')
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
if solvent == 'explicit':
# Create MODELLER object.
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators[solvent].getForceField(), model='tip3p', padding=9.0*unit.angstrom)
topologies[environment] = modeller.getTopology()
positions[environment] = modeller.getPositions()
else:
environment = solvent + '-' + component
topologies[environment] = topologies[component]
positions[environment] = positions[component]
natoms = sum( 1 for atom in topologies[environment].atoms() )
print("System '%s' has %d atoms" % (environment, natoms))
# Set up the proposal engines.
print('Initializing proposal engines...')
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
proposal_metadata = { }
proposal_engines = dict()
for environment in environments:
proposal_engines[environment] = SmallMoleculeSetProposalEngine(molecules, system_generators[environment], residue_name='MOL')
# Generate systems
print('Building systems...')
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
print('Defining thermodynamic states...')
thermodynamic_states = dict()
for component in components:
for solvent in solvents:
environment = solvent + '-' + component
if solvent == 'explicit':
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
else:
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature)
# Create SAMS samplers
print('Creating SAMS samplers...')
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
if solvent == 'explicit':
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature)
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_state, sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
thermodynamic_states[environment] = thermodynamic_state
# Create a constant-pH sampler
from perses.samplers.samplers import ProtonationStateSampler
designer = ProtonationStateSampler(complex_sampler=exen_samplers['explicit-complex'], solvent_sampler=sams_samplers['explicit-inhibitor'], log_state_penalties=log_state_penalties, storage=self.storage)
designer.verbose = True
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.systems = systems
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
# This system must currently be minimized.
minimize_wrapper(self)
print('AblImatinibProtonationStateTestSystem initialized.')
class ImidazoleProtonationStateTestSystem(PersesTestSystem):
"""
Create a consistent set of SAMS samplers useful for sampling protonation states of imidazole in water.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit', 'implicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for implicit solvent hydration free energies
Examples
--------
>>> from perses.tests.testsystems import AblImatinibProtonationStateTestSystem
>>> testsystem = AblImatinibProtonationStateTestSystem()
# Build a system
>>> system = testsystem.system_generators['explicit-inhibitor'].build_system(testsystem.topologies['explicit-inhibitor'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['explicit-inhibitor']
"""
def __init__(self, **kwargs):
super(ImidazoleProtonationStateTestSystem, self).__init__(**kwargs)
solvents = ['vacuum', 'explicit'] # TODO: Add 'implicit' once GBSA parameterization for small molecules is working
components = ['imidazole']
padding = 9.0*unit.angstrom
explicit_solvent_model = 'tip3p'
setup_path = 'data/constant-pH/imidazole/'
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Construct list of all environments
environments = list()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
environments.append(environment)
# Read mol2 file containing protonation states and extract canonical isomeric SMILES from this.
from pkg_resources import resource_filename
molecules = list()
mol2_filename = resource_filename('perses', os.path.join(setup_path, 'imidazole/imidazole-epik-charged.mol2'))
ifs = oechem.oemolistream(mol2_filename)
mol = oechem.OEMol()
while oechem.OEReadMolecule(ifs, mol):
smiles = oechem.OEMolToSmiles(mol)
molecules.append(smiles)
# Read log probabilities
log_state_penalties = dict()
state_penalties_filename = resource_filename('perses', os.path.join(setup_path, 'imidazole/imidazole-state-penalties.out'))
for (smiles, log_state_penalty) in zip(molecules, np.fromfile(state_penalties_filename, sep='\n')):
log_state_penalties[smiles] = log_state_penalty
# Add current molecule
smiles = 'C1=CN=CN1'
molecules.append(smiles)
self.molecules = molecules
log_state_penalties[smiles] = 0.0
# Expand molecules without explicit stereochemistry and make canonical isomeric SMILES.
molecules = sanitizeSMILES(self.molecules)
# Create a system generator for desired forcefields
print('Creating system generators...')
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators = dict()
system_generators['explicit'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml', 'tip3p.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True, barostat=barostat)
system_generators['implicit'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml', 'amber99_obc.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : app.OBC2, 'constraints' : None },
use_antechamber=True)
system_generators['vacuum'] = SystemGenerator([gaff_xml_filename, 'amber99sbildn.xml'],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None },
use_antechamber=True)
# Copy system generators for all environments
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
system_generators[environment] = system_generators[solvent]
# Load topologies and positions for all components
from simtk.openmm.app import PDBFile, Modeller
topologies = dict()
positions = dict()
for component in components:
pdb_filename = resource_filename('perses', os.path.join(setup_path, '%s.pdb' % component))
print(pdb_filename)
pdbfile = PDBFile(pdb_filename)
topologies[component] = pdbfile.topology
positions[component] = pdbfile.positions
# Construct positions and topologies for all solvent environments
print('Constructing positions and topologies...')
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
if solvent == 'explicit':
# Create MODELLER object.
modeller = app.Modeller(topologies[component], positions[component])
modeller.addSolvent(system_generators[solvent].getForceField(), model='tip3p', padding=9.0*unit.angstrom)
topologies[environment] = modeller.getTopology()
positions[environment] = modeller.getPositions()
else:
environment = solvent + '-' + component
topologies[environment] = topologies[component]
positions[environment] = positions[component]
natoms = sum( 1 for atom in topologies[environment].atoms() )
print("System '%s' has %d atoms" % (environment, natoms))
# DEBUG: Write initial PDB file
outfile = open(environment + '.initial.pdb', 'w')
PDBFile.writeFile(topologies[environment], positions[environment], file=outfile)
outfile.close()
# Set up the proposal engines.
print('Initializing proposal engines...')
residue_name = 'UNL' # TODO: Figure out residue name automatically
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
proposal_metadata = { }
proposal_engines = dict()
for environment in environments:
storage = None
if self.storage is not None:
storage = NetCDFStorageView(self.storage, envname=environment)
proposal_engines[environment] = SmallMoleculeSetProposalEngine(molecules, system_generators[environment], residue_name=residue_name, storage=storage)
# Generate systems
print('Building systems...')
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
print('Defining thermodynamic states...')
thermodynamic_states = dict()
for component in components:
for solvent in solvents:
environment = solvent + '-' + component
if solvent == 'explicit':
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
else:
thermodynamic_states[environment] = states.ThermodynamicState(system=systems[environment], temperature=temperature)
# Create SAMS samplers
print('Creating SAMS samplers...')
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for solvent in solvents:
for component in components:
environment = solvent + '-' + component
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
storage = None
if self.storage is not None:
storage = NetCDFStorageView(self.storage, envname=environment)
if solvent == 'explicit':
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature, pressure=pressure)
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
thermodynamic_state = states.ThermodynamicState(system=systems[environment], temperature=temperature)
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_state, sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps, 'mcmc_nsteps':self._mcmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
thermodynamic_states[environment] = thermodynamic_state
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.systems = systems
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = None
print('ImidazoleProtonationStateTestSystem initialized.')
def minimize_wrapper(testsystem):
"""
Minimize all structures in test system.
TODO
----
Use sampler thermodynamic states instead of testsystem.systems
Parameters
----------
testystem : PersesTestSystem
The testsystem to minimize.
"""
for environment in testsystem.environments:
print("Minimizing '%s'..." % environment)
thermostate = ThermodynamicState(system = testsystem.systems[environment], temperature = 300.0 * unit.kelvin) #minimizer is temperature-independent
sampler_state = SamplerState(positions = testsystem.positions[environment])
minimize(thermostate, sampler_state)
testsystem.positions[environment] = sampler_state.positions
testsystem.mcmc_samplers[environment].sampler_state = sampler_state
class SmallMoleculeLibraryTestSystem(PersesTestSystem):
"""
Create a consistent set of samplers useful for testing SmallMoleculeProposalEngine on alkanes in various solvents.
This is useful for testing a variety of components.
Properties
----------
environments : list of str
Available environments: ['vacuum', 'explicit']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for explicit solvent hydration free energies
molecules : list
Molecules in library. Currently only SMILES format is supported.
Examples
--------
>>> from perses.tests.testsystems import AlkanesTestSystem
>>> testsystem = AlkanesTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].build_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['explicit']
"""
def __init__(self, constraints=app.HBonds, premapped_json_dict=None, **kwargs):
super(SmallMoleculeLibraryTestSystem, self).__init__(**kwargs)
# Expand molecules without explicit stereochemistry and make canonical isomeric SMILES.
molecules = sanitizeSMILES(self.molecules)
molecules = canonicalize_SMILES(molecules)
environments = ['explicit', 'vacuum']
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
# Create a system generator for our desired forcefields.
from pkg_resources import resource_filename
system_generators = dict()
gaff_xml_filename = resource_filename('perses', 'data/gaff.xml')
barostat = openmm.MonteCarloBarostat(pressure, temperature)
system_generators['explicit'] = SystemGenerator([gaff_xml_filename, 'tip3p.xml'], use_antechamber=True,
forcefield_kwargs={ 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : constraints }, barostat=barostat)
system_generators['vacuum'] = SystemGenerator([gaff_xml_filename], use_antechamber=True,
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : constraints })
# Create topologies and positions
topologies = dict()
positions = dict()
# # Parametrize and generate residue templates for small molecule set
from openmoltools.forcefield_generators import generateForceFieldFromMolecules, generateTopologyFromOEMol, gaffTemplateGenerator
from io import StringIO
from perses.utils.openeye import smiles_to_oemol,extractPositionsFromOEMol
forcefield = app.ForceField(gaff_xml_filename, 'tip3p.xml')
# clinical_kinase_inhibitors_filename = resource_filename('perses', 'data/clinical-kinase-inhibitors.xml')
# forcefield = app.ForceField(gaff_xml_filename, 'tip3p.xml', clinical-kinase-inhibitors_filename)
from openmoltools import forcefield_generators ## IVY
forcefield.registerTemplateGenerator(gaffTemplateGenerator) ## IVY
d_smiles_to_oemol = {smiles : smiles_to_oemol(smiles, "MOL_%d" % i)for i, smiles in enumerate(molecules)}
# ffxml, failed_molecule_list = generateForceFieldFromMolecules(list(d_smiles_to_oemol.values()), ignoreFailures=True)
#
# f = open('clinical-kinase-inhibitors.xml', 'w')
# f.write(ffxml)
# f.close()
#
# if failed_molecule_list:
# raise Exception("Failed to generate forcefield for the following molecules: ", failed_molecule_list)
# forcefield.loadFile(StringIO(ffxml))
# Create molecule in vacuum.
smiles = molecules[0] # current sampler state ## IVY add this back in
# smiles = 'C5=C(C1=CN=CC=C1)N=C(NC2=C(C=CC(=C2)NC(C3=CC=C(C=C3)CN4CCN(CC4)C)=O)C)N=C5' ## IVY delete this Imatinib
# smiles = 'Cc1ccc(cc1C#Cc2cnc3n2nccc3)C(=O)Nc4ccc(c(c4)C(F)(F)F)CN5CCN(CC5)C'
# smiles = 'Cc1c2cnc(nc2n(c(=O)c1C(=O)C)C3CCCC3)Nc4ccc(cn4)N5CCNCC5' # palbociclib
# smiles = 'Cc1c2cnc(nc2n(c(=O)c1C(=O)C)C3CCCC3)Nc4ccc(cn4)N5CCNCC5'
# smiles = 'C[C@@H]1CCN(C[C@@H]1[N@](C)c2c3cc[nH]c3ncn2)C(=O)CC#N'
# smiles = 'CC1=C(C=C(C=C1)NC2=NC=CC(=N2)N(C)C3=CC4=NN(C(=C4C=C3)C)C)S(=O)(=O)N' # Pazopanib
print("smiles: ", smiles)
# smiles = sanitizeSMILES([smiles])[0]
# print("sanitized: ", smiles)
# molecule = smiles_to_oemol(smiles, title=d_smiles_to_oemol[smiles].GetTitle())
molecule = smiles_to_oemol(smiles)
topologies['vacuum'] = generateTopologyFromOEMol(molecule)
positions['vacuum'] = extractPositionsFromOEMol(molecule)
# Create molecule in solvent.
modeller = app.Modeller(topologies['vacuum'], positions['vacuum'])
modeller.addSolvent(forcefield, model='tip3p', padding=9.0*unit.angstrom)
topologies['explicit'] = modeller.getTopology()
positions['explicit'] = modeller.getPositions()
# Set up the proposal engines.
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine, PremappedSmallMoleculeSetProposalEngine, SmallMoleculeAtomMapper
proposal_metadata = { }
proposal_engines = dict()
if not premapped_json_dict:
for environment in environments:
proposal_engines[environment] = SmallMoleculeSetProposalEngine(molecules, system_generators[environment], residue_name=d_smiles_to_oemol[smiles].GetTitle())
else:
atom_mapper = SmallMoleculeAtomMapper.from_json(premapped_json_dict)
for environment in environments:
proposal_engines[environment] = PremappedSmallMoleculeSetProposalEngine(atom_mapper, system_generators[environment], residue_name=d_smiles_to_oemol[smiles].GetTitle())
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
thermodynamic_states['explicit'] = states.ThermodynamicState(system=systems['explicit'], temperature=temperature, pressure=pressure)
thermodynamic_states['vacuum'] = states.ThermodynamicState(system=systems['vacuum'], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment == 'explicit':
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
# reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':self._ncmc_nsteps}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['explicit'] : 1.0, sams_samplers['vacuum'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
class AlkanesTestSystem(SmallMoleculeLibraryTestSystem):
"""
Library of small alkanes in various solvent environments.
"""
def __init__(self, **kwargs):
self.molecules = ['CCC', 'CCCC', 'CCCCC', 'CCCCCC']
super(AlkanesTestSystem, self).__init__(**kwargs)
class KinaseInhibitorsTestSystem(SmallMoleculeLibraryTestSystem):
"""
Library of clinical kinase inhibitors in various solvent environments. This is often problematic.
"""
def __init__(self, **kwargs):
# Read SMILES from CSV file of clinical kinase inhibitors.
from pkg_resources import resource_filename
smiles_filename = resource_filename('perses', 'data/clinical-kinase-inhibitors.csv')
import csv
molecules = list()
with open(smiles_filename, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in csvreader:
name = row[0]
smiles = row[1]
molecules.append(smiles)
self.molecules = molecules
# Intialize
super(KinaseInhibitorsTestSystem, self).__init__(**kwargs)
#TODO fix this test system
class T4LysozymeInhibitorsTestSystem(SmallMoleculeLibraryTestSystem):
"""
Library of T4 lysozyme L99A inhibitors in various solvent environments.
"""
def read_smiles(self, filename):
import csv
molecules = list()
with open(filename, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t', quotechar='"')
for row in csvreader:
name = row[0]
smiles = row[1]
reference = row[2]
molecules.append(smiles)
return molecules
def __init__(self, **kwargs):
# Read SMILES from CSV file of clinical kinase inhibitors.
from pkg_resources import resource_filename
molecules = list()
molecules += self.read_smiles(resource_filename('perses', 'data/L99A-binders.txt'))
molecules += self.read_smiles(resource_filename('perses', 'data/L99A-non-binders.txt'))
self.molecules = molecules
# Intialize
super(T4LysozymeInhibitorsTestSystem, self).__init__(**kwargs)
class FusedRingsTestSystem(SmallMoleculeLibraryTestSystem):
"""
Simple test system containing fused rings (benzene <--> naphtalene) in explicit solvent.
"""
def __init__(self, **kwargs):
self.molecules = ['c1ccccc1', 'c1ccc2ccccc2c1'] # benzene, naphthalene
super(FusedRingsTestSystem, self).__init__(**kwargs)
class ValenceSmallMoleculeLibraryTestSystem(PersesTestSystem):
"""
Create a consistent set of samplers useful for testing SmallMoleculeProposalEngine on alkanes with a valence-only forcefield.
Properties
----------
environments : list of str
Available environments: ['vacuum']
topologies : dict of simtk.openmm.app.Topology
Initial system Topology objects; topologies[environment] is the topology for `environment`
positions : dict of simtk.unit.Quantity of [nparticles,3] with units compatible with nanometers
Initial positions corresponding to initial Topology objects
system_generators : dict of SystemGenerator objects
SystemGenerator objects for environments
proposal_engines : dict of ProposalEngine
Proposal engines
themodynamic_states : dict of thermodynamic_states
Themodynamic states for each environment
mcmc_samplers : dict of MCMCSampler objects
MCMCSampler objects for environments
exen_samplers : dict of ExpandedEnsembleSampler objects
ExpandedEnsembleSampler objects for environments
sams_samplers : dict of SAMSSampler objects
SAMSSampler objects for environments
designer : MultiTargetDesign sampler
Example MultiTargetDesign sampler for explicit solvent hydration free energies
molecules : list
Molecules in library. Currently only SMILES format is supported.
Examples
--------
>>> from perses.tests.testsystems import ValenceSmallMoleculeLibraryTestSystem
>>> testsystem = ValenceSmallMoleculeLibraryTestSystem()
# Build a system
>>> system = testsystem.system_generators['vacuum'].build_system(testsystem.topologies['vacuum'])
# Retrieve a SAMSSampler
>>> sams_sampler = testsystem.sams_samplers['vacuum']
"""
def __init__(self, **kwargs):
super(ValenceSmallMoleculeLibraryTestSystem, self).__init__(**kwargs)
initial_molecules = ['CCCCC','CC(C)CC', 'CCC(C)C', 'CCCCC', 'C(CC)CCC']
molecules = self._canonicalize_smiles(initial_molecules)
environments = ['vacuum']
# Create a system generator for our desired forcefields.
system_generators = dict()
from pkg_resources import resource_filename
gaff_xml_filename = resource_filename('perses', 'data/gaff-valence-only.xml')
system_generators['vacuum'] = SystemGenerator([gaff_xml_filename],
forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None })
#
# Create topologies and positions
#
topologies = dict()
positions = dict()
from openmoltools import forcefield_generators
forcefield = app.ForceField(gaff_xml_filename, 'tip3p.xml')
forcefield.registerTemplateGenerator(forcefield_generators.gaffTemplateGenerator)
# Create molecule in vacuum.
from perses.utils.openeye import smiles_to_oemol,extractPositionsFromOEMol
smiles = molecules[0] # current sampler state
molecule = smiles_to_oemol(smiles)
topologies['vacuum'] = forcefield_generators.generateTopologyFromOEMol(molecule)
positions['vacuum'] = extractPositionsFromOEMol(molecule)
# Set up the proposal engines.
from perses.rjmc.topology_proposal import SmallMoleculeSetProposalEngine
proposal_metadata = { }
proposal_engines = dict()
for environment in environments:
proposal_engines[environment] = SmallMoleculeSetProposalEngine(molecules, system_generators[environment])
# Generate systems
systems = dict()
for environment in environments:
systems[environment] = system_generators[environment].build_system(topologies[environment])
# Define thermodynamic state of interest.
thermodynamic_states = dict()
temperature = 300*unit.kelvin
pressure = 1.0*unit.atmospheres
thermodynamic_states['vacuum'] = states.ThermodynamicState(system=systems['vacuum'], temperature=temperature)
# Create SAMS samplers
from perses.samplers.samplers import ExpandedEnsembleSampler, SAMSSampler
mcmc_samplers = dict()
exen_samplers = dict()
sams_samplers = dict()
for environment in environments:
storage = None
if self.storage:
storage = NetCDFStorageView(self.storage, envname=environment)
chemical_state_key = proposal_engines[environment].compute_state_key(topologies[environment])
if environment == 'explicit':
sampler_state = states.SamplerState(positions=positions[environment], box_vectors=systems[environment].getDefaultPeriodicBoxVectors())
else:
sampler_state = states.SamplerState(positions=positions[environment])
mcmc_samplers[environment] = MCMCSampler(thermodynamic_states[environment], sampler_state, copy.deepcopy(self._move))
00 # reduce number of steps for testing
exen_samplers[environment] = ExpandedEnsembleSampler(mcmc_samplers[environment], topologies[environment], chemical_state_key, proposal_engines[environment], self.geometry_engine, options={'nsteps':0}, storage=storage)
exen_samplers[environment].verbose = True
sams_samplers[environment] = SAMSSampler(exen_samplers[environment], storage=storage)
sams_samplers[environment].verbose = True
# Create test MultiTargetDesign sampler.
from perses.samplers.samplers import MultiTargetDesign
target_samplers = { sams_samplers['vacuum'] : 1.0, sams_samplers['vacuum'] : -1.0 }
designer = MultiTargetDesign(target_samplers, storage=self.storage)
# Store things.
self.molecules = molecules
self.environments = environments
self.topologies = topologies
self.positions = positions
self.system_generators = system_generators
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
self.sams_samplers = sams_samplers
self.designer = designer
def _canonicalize_smiles(self, list_of_smiles):
"""
Turn a list of smiles strings into openeye canonical
isomeric smiles.
Parameters
----------
list_of_smiles : list of str
input smiles
Returns
-------
list_of_canonicalized_smiles : list of str
canonical isomeric smiles
"""
list_of_canonicalized_smiles = []
ofs = oechem.oemolostream('current.mol2') # DEBUG
for smiles in list_of_smiles:
mol = oechem.OEMol()
oechem.OESmilesToMol(mol, smiles)
oechem.OEAddExplicitHydrogens(mol)
can_smi = oechem.OECreateSmiString(mol, OESMILES_OPTIONS)
list_of_canonicalized_smiles.append(can_smi)
ofs.close() # DEBUG
return list_of_canonicalized_smiles
class NullTestSystem(PersesTestSystem):
"""
Test turning a small molecule into itself in vacuum
Currently only trying to test ExpandedEnsemble sampler, therefore
SAMS sampler and MultiTargetDesign are not implemented at this time
Uses a custom ProposalEngine to only match subset of atoms, requiring
geometry to build in the rest
geometry_engine.write_proposal_pdb set to False
Constructor:
NullTestSystem(storage_filename="null.nc", exen_pdb_filename=None)
Arguments:
storage_filename, OPTIONAL, string
Default is "null.nc"
Storage must be provided in order to analyze testsystem acceptance rates
exen_pdb_filename, OPTIONAL, string
Default is None
If value is not None, will write pdbfile after every ExpandedEnsemble
iteration
scheme, OPTIONAL, string
Default is 'ncmc-geometry-ncmc'
Scheme to be used by ExpandedEnsembleSampler
Must be in ['geometry-ncmc-geometry','ncmc-geometry-ncmc','geometry-ncmc']
Default will run NCMC on old and new system separately
Only one environment ('vacuum') is currently implemented; however all
samplers are saved in dictionaries for consistency with other testsystems
"""
def __init__(self, storage_filename="null.nc", exen_pdb_filename=None, scheme='ncmc-geometry-ncmc', options=None):
super(NullTestSystem, self).__init__(storage_filename=storage_filename)
if options is None:
options = {'nsteps':0}
if 'nsteps' not in options.keys():
options['nsteps'] = 0
environments = ['vacuum', 'explicit']
# self.geometry_engine.write_proposal_pdb = True
system_generators = dict()
topologies = dict()
positions = dict()
proposal_engines = dict()
thermodynamic_states = dict()
mcmc_samplers = dict()
exen_samplers = dict()
from perses.tests.utils import oemol_to_omm_ff, get_data_filename
from openmoltools.openeye import iupac_to_oemol,generate_conformers
from perses.samplers.samplers import ExpandedEnsembleSampler
for key in environments:
gaff_xml_filename = get_data_filename('data/gaff.xml')
if key == "vacuum":
forcefield_kwargs = {'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None}
ff_list = [gaff_xml_filename]
if key == "explicit":
ff_list = [gaff_xml_filename, 'tip3p.xml']
forcefield_kwargs={ 'nonbondedMethod' : app.CutoffPeriodic, 'nonbondedCutoff' : 9.0 * unit.angstrom, 'implicitSolvent' : None, 'constraints' : app.HBonds }
system_generator = SystemGenerator(ff_list, forcefield_kwargs=forcefield_kwargs)
system_generators[key] = system_generator
proposal_engine = self.NullProposal(system_generator, residue_name=self.mol_name)
initial_molecule = iupac_to_oemol(iupac_name=self.mol_name)
initial_molecule = generate_conformers(initial_molecule,max_confs=1)
initial_system, initial_positions, initial_topology = oemol_to_omm_ff(initial_molecule, self.mol_name)
if key == "explicit":
modeller = app.Modeller(initial_topology, initial_positions)
modeller.addSolvent(system_generators[key].getForceField(), model='tip3p', padding=9.0*unit.angstrom)
initial_topology = modeller.getTopology()
initial_positions = modeller.getPositions()
initial_system = system_generators[key].build_system(initial_topology)
initial_topology._state_key = proposal_engine._fake_states[0]
temperature = 300*unit.kelvin
thermodynamic_state = states.ThermodynamicState(system=initial_system, temperature=temperature)
chemical_state_key = proposal_engine.compute_state_key(initial_topology)
sampler_state = states.SamplerState(positions=initial_positions)
mcmc_sampler = MCMCSampler(thermodynamic_state, sampler_state, copy.deepcopy(self._move))
mcmc_sampler.nsteps = 500
mcmc_sampler.timestep = 1.0*unit.femtosecond
mcmc_sampler.verbose = True
exen_sampler = ExpandedEnsembleSampler(mcmc_sampler, initial_topology, chemical_state_key, proposal_engine, self.geometry_engine, options=options, storage=self.storage)
exen_sampler.verbose = True
if exen_pdb_filename is not None:
exen_sampler.pdbfile = open(exen_pdb_filename,'w')
topologies[key] = initial_topology
positions[key] = initial_positions
proposal_engines[key] = proposal_engine
thermodynamic_states[key] = thermodynamic_state
mcmc_samplers[key] = mcmc_sampler
exen_samplers[key] = exen_sampler
# save
self.environments = environments
self.storage_filename = storage_filename
self.system_generators = system_generators
self.topologies = topologies
self.positions = positions
self.proposal_engines = proposal_engines
self.thermodynamic_states = thermodynamic_states
self.mcmc_samplers = mcmc_samplers
self.exen_samplers = exen_samplers
class NaphthaleneTestSystem(NullTestSystem):
"""
Test turning Naphthalene into Naphthalene in vacuum
Currently only trying to test ExpandedEnsemble sampler, therefore
SAMS sampler and MultiTargetDesign are not implemented at this time
Uses a custom ProposalEngine to only match one ring, requiring
geometry to build in the other
geometry_engine.write_proposal_pdb set to True
Constructor:
NaphthaleneTestSystem(storage_filename="naphthalene.nc", exen_pdb_filename=None)
Arguments:
storage_filename, OPTIONAL, string
Default is "naphthalene.nc"
Storage must be provided in order to analyze testsystem acceptance rates
exen_pdb_filename, OPTIONAL, string
Default is None
If value is not None, will write pdbfile after every ExpandedEnsemble
iteration
scheme, OPTIONAL, string
Default is 'geometry-ncmc-geometry'
Scheme to be used by ExpandedEnsembleSampler
Must be in ['geometry-ncmc-geometry','ncmc-geometry-ncmc','geometry-ncmc']
Default will use a hybrid NCMC method
Only one environment ('vacuum') is currently implemented; however all
samplers are saved in dictionaries for consistency with other testsystems
"""
def __init__(self, storage_filename="naphthalene.nc", exen_pdb_filename=None, scheme='geometry-ncmc-geometry', options=None):
"""
__init__(self, storage_filename="naphthalene.nc", exen_pdb_filename=None, scheme='geometry-ncmc-geometry'):
"""
from perses.rjmc.topology_proposal import NaphthaleneProposalEngine
self.NullProposal = NaphthaleneProposalEngine
self.mol_name = 'naphthalene'
super(NaphthaleneTestSystem, self).__init__(storage_filename=storage_filename, exen_pdb_filename=exen_pdb_filename, scheme=scheme, options=options)
class ButaneTestSystem(NullTestSystem):
"""
Test turning Butane into Butane in vacuum
Currently only trying to test ExpandedEnsemble sampler, therefore
SAMS sampler and MultiTargetDesign are not implemented at this time
Uses a custom ProposalEngine to only match two carbons, have geometry
engine choose positions for others
geometry_engine.write_proposal_pdb set to True
Constructor:
ButaneTestSystem(storage_filename="butane.nc", exen_pdb_filename=None)
Arguments:
storage_filename, OPTIONAL, string
Default is "butane.nc"
Storage must be provided in order to analyze testsystem acceptance rates
exen_pdb_filename, OPTIONAL, string
Default is None
If value is not None, will write pdbfile after every ExpandedEnsemble
iteration
scheme, OPTIONAL, string
Default is 'geometry-ncmc-geometry'
Scheme to be used by ExpandedEnsembleSampler
Must be in ['geometry-ncmc-geometry','ncmc-geometry-ncmc','geometry-ncmc']
Default will use a hybrid NCMC method
Only one environment ('vacuum') is currently implemented; however all
samplers are saved in dictionaries for consistency with other testsystems
"""
def __init__(self, storage_filename="butane.nc", exen_pdb_filename=None, scheme='geometry-ncmc-geometry', options=None):
"""
__init__(self, storage_filename="butane.nc", exen_pdb_filename=None, scheme='geometry-ncmc-geometry'):
"""
from perses.rjmc.topology_proposal import ButaneProposalEngine
self.NullProposal = ButaneProposalEngine
self.mol_name = 'butane'
super(ButaneTestSystem, self).__init__(storage_filename=storage_filename, exen_pdb_filename=exen_pdb_filename, scheme=scheme, options=options)
class PropaneTestSystem(NullTestSystem):
"""
Test turning Propane into Propane in vacuum
Currently only trying to test ExpandedEnsemble sampler, therefore
SAMS sampler and MultiTargetDesign are not implemented at this time
Uses a custom ProposalEngine to map CH3-CH2, have geometry build in the
other CH3
geometry_engine.write_proposal_pdb set to True
Constructor:
ButaneTestSystem(storage_filename="propane.nc", exen_pdb_filename=None)
Arguments:
storage_filename, OPTIONAL, string
Default is "propane.nc"
Storage must be provided in order to analyze testsystem acceptance rates
exen_pdb_filename, OPTIONAL, string
Default is None
If value is not None, will write pdbfile after every ExpandedEnsemble
iteration
scheme, OPTIONAL, string
Default is 'geometry-ncmc-geometry'
Scheme to be used by ExpandedEnsembleSampler
Must be in ['geometry-ncmc-geometry','ncmc-geometry-ncmc','geometry-ncmc']
Default will use a hybrid NCMC method
Only one environment ('vacuum') is currently implemented; however all
samplers are saved in dictionaries for consistency with other testsystems
"""
def __init__(self, storage_filename="propane.nc", exen_pdb_filename=None, scheme='geometry-ncmc-geometry', options=None):
"""
__init__(self, storage_filename="propane.nc", exen_pdb_filename=None, scheme='geometry-ncmc-geometry'):
"""
from perses.rjmc.topology_proposal import PropaneProposalEngine
self.NullProposal = PropaneProposalEngine
self.mol_name = 'propane'
super(PropaneTestSystem, self).__init__(storage_filename=storage_filename, exen_pdb_filename=exen_pdb_filename, scheme=scheme, options=options)
def run_null_system(testsystem):
"""
Intended for use with NullTestSystem subclasses ONLY
Runs TestSystem ExpandedEnsemble sampler ONLY
Uses BAR to check whether the free energies of the two states
(both naphthalene) are within 6 sigma of 0
Imports netCDF4 to read in storage file and access data
Arguments:
----------
testsystem : NaphthaleneTestSystem, ButantTestSystem, or PropaneTestSystem
Only these three test systems have the proposal_engine._fake_states
attribute, which differentiates between 2 states of a null proposal
CURRENTLY:
The expanded ensemble acceptance rate of naphthalene-A to naphthalene-B
is very low. This test will run 10 iterations of the ExpandedEnsemble
sampler until a switch is accepted, and then run approximately that
number of steps again, to ensure w_f and w_r have nonzero length. This
should not be necessary if the acceptance rate is higher, and the
number of exen_sampler iterations can be fixed.
TODO:
move netcdf import to analysis for general use
move BAR import to analysis, define use of BAR to be generalized
"""
if not issubclass(type(testsystem), NullTestSystem):
raise(NotImplementedError("run_null_system is only compatible with NaphthaleneTestSystem, ButantTestSystem or PropaneTestSystem; given {0}".format(type(testsystem))))
import netCDF4 as netcdf
import pickle
import codecs
for key in testsystem.environments: # only one key: vacuum
# run a single iteration to generate item in number_of_state_visits dict
testsystem.exen_samplers[key].run(niterations=100)
# until a switch is accepted, only the initial state will have an item
# in the number_of_state_visits dict
while len(testsystem.exen_samplers[key].number_of_state_visits.keys()) == 1:
testsystem.exen_samplers[key].run(niterations=10)
# after a switch has been accepted, run approximately the same number of
# steps again, to end up with roughly equal number of proposals starting
# from each state
testsystem.exen_samplers[key].run(niterations=testsystem.exen_samplers[key].nrejected)
print(testsystem.exen_samplers[key].number_of_state_visits)
print("Acceptances in {0} iterations: {1}".format(testsystem.exen_samplers[key].iteration, testsystem.exen_samplers[key].naccepted))
from perses.analysis import Analysis
analysis = Analysis(testsystem.storage_filename)
analysis.plot_exen_logp_components()
ncfile = netcdf.Dataset(testsystem.storage_filename, 'r')
ee_sam = ncfile.groups['ExpandedEnsembleSampler']
niterations = ee_sam.variables['logp_accept'].shape[0]
logps = np.zeros(niterations, np.float64)
state_keys = list()
for n in range(niterations):
logps[n] = ee_sam.variables['logp_accept'][n]
s_key = str(ee_sam.variables['proposed_state_key'][n])
state_keys.append(pickle.loads(codecs.decode(s_key, "base64")))
len_w_r = state_keys.count(testsystem.proposal_engines[key]._fake_states[0])
len_w_f = state_keys.count(testsystem.proposal_engines[key]._fake_states[1])
try:
assert niterations == len_w_f + len_w_r
except:
print("{0} iterations, but {1} started from A and {2} started from B?".format(niterations, len_w_f, len_w_r))
if len_w_f == 0 or len_w_r == 0:
# test failure, but what to do?
raise(Exception("Cannot run BAR because no transitions were made"))
# after importing all logps, use proposed_state_key to split them into
# separate arrays depending on the direction of the proposed switch
w_f = np.zeros(len_w_f, np.float64)
w_r = np.zeros(len_w_r, np.float64)
w_f_count = 0
w_r_count = 0
for n in range(niterations):
if state_keys[n] == testsystem.proposal_engines[key]._fake_states[1]:
w_f[w_f_count] = logps[n]
w_f_count += 1
else:
w_r[w_r_count] = logps[n]
w_r_count += 1
from pymbar import BAR
[df, ddf] = BAR(w_f, w_r, method='self-consistent-iteration')
print('%8.3f +- %.3f kT' % (df, ddf))
NSIGMA_MAX = 6.0
if (abs(df) > NSIGMA_MAX * ddf):
msg = 'Delta F (%d proposals) = %f +- %f kT; should be within %f sigma of 0' % (niterations, df, ddf, NSIGMA_MAX)
msg += '\n'
msg += 'w_f = %s\n' % str(w_f)
msg += 'w_r = %s\n' % str(w_r)
raise Exception(msg)
def check_topologies(testsystem):
"""
Check that all SystemGenerators can build systems for their corresponding Topology objects.
"""
for environment in testsystem.environments:
topology = testsystem.topologies[environment]
try:
testsystem.system_generators[environment].build_system(topology)
except Exception as e:
msg = str(e)
msg += '\n'
msg += "topology for environment '%s' cannot be built into a system" % environment
from perses.utils.smallmolecules import show_topology
show_topology(topology)
raise Exception(msg)
def checktestsystem(testsystem_class):
# Instantiate test system.
tmpfile = tempfile.NamedTemporaryFile()
storage_filename = tmpfile.name
testsystem = testsystem_class(storage_filename=storage_filename)
# Check topologies
check_topologies(testsystem)
def test_testsystems():
"""
Test instantiation of all test systems.
"""
testsystem_names = [ 'KinaseInhibitorsTestSystem', 'T4LysozymeInhibitorsTestSystem','AlkanesTestSystem', 'AlanineDipeptideTestSystem']
niterations = 2 # number of iterations to run
for testsystem_name in testsystem_names:
import perses.tests.testsystems
testsystem_class = getattr(perses.tests.testsystems, testsystem_name)
f = partial(checktestsystem, testsystem_class)
f.description = "Testing %s" % (testsystem_name)
yield f
def run_t4_inhibitors():
"""
Run T4 lysozyme inhibitors in solvents test system.
"""
testsystem = T4LysozymeInhibitorsTestSystem(storage_filename='output.nc', ncmc_nsteps=5000, mcmc_nsteps=100)
for environment in ['explicit', 'vacuum']:
#testsystem.exen_samplers[environment].pdbfile = open('t4-' + component + '.pdb', 'w')
#testsystem.exen_samplers[environment].options={'nsteps':50} # instantaneous MC
testsystem.exen_samplers[environment].verbose = True
testsystem.sams_samplers[environment].verbose = True
testsystem.designer.verbose = True
testsystem.sams_samplers['explicit'].run(niterations=50)
# Analyze data.
#from perses.analysis import Analysis
#analysis = Analysis(storage_filename='output.nc')
#analysis.plot_sams_weights('sams.pdf')
#analysis.plot_ncmc_work('ncmc.pdf')
def run_alkanes():
"""
Run alkanes in solvents test system.
"""
testsystem = AlkanesTestSystem(storage_filename='output.nc', ncmc_nsteps=5000, mcmc_nsteps=100)
for environment in ['explicit', 'vacuum']:
#testsystem.exen_samplers[environment].pdbfile = open('t4-' + component + '.pdb', 'w')
#testsystem.exen_samplers[environment].options={'nsteps':50} # instantaneous MC
testsystem.exen_samplers[environment].verbose = True
testsystem.sams_samplers[environment].verbose = True
testsystem.designer.verbose = True
testsystem.sams_samplers['explicit'].run(niterations=50)
def run_t4():
"""
Run T4 lysozyme test system.
"""
testsystem = T4LysozymeTestSystem(ncmc_nsteps=0)
solvent = 'explicit'
for component in ['complex', 'receptor']:
testsystem.exen_samplers[solvent + '-' + component].pdbfile = open('t4-' + component + '.pdb', 'w')
testsystem.sams_samplers[solvent + '-' + component].run(niterations=5)
testsystem.designer.verbose = True
testsystem.designer.run(niterations=5)
# Analyze data.
#from perses.analysis import Analysis
#analysis = Analysis(storage_filename='output.nc')
#analysis.plot_sams_weights('sams.pdf')
#analysis.plot_ncmc_work('ncmc.pdf')
def run_myb():
"""
Run myb test system.
"""
testsystem = MybTestSystem(ncmc_nsteps=0, mcmc_nsteps=100)
solvent = 'implicit'
testsystem.exen_samplers[solvent + '-peptide'].pdbfile = open('myb-vacuum.pdb', 'w')
testsystem.exen_samplers[solvent + '-complex'].pdbfile = open('myb-complex.pdb', 'w')
testsystem.sams_samplers[solvent + '-complex'].run(niterations=5)
#testsystem.designer.verbose = True
#testsystem.designer.run(niterations=500)
#testsystem.exen_samplers[solvent + '-peptide'].verbose=True
#testsystem.exen_samplers[solvent + '-peptide'].run(niterations=100)
def run_abl_imatinib_resistance():
"""
Run abl test system.
"""
testsystem = AblImatinibResistanceTestSystem(ncmc_nsteps=20000, mcmc_nsteps=20000)
#for environment in testsystem.environments:
for environment in ['vacuum-complex']:
print(environment)
testsystem.exen_samplers[environment].pdbfile = open('abl-imatinib-%s.pdb' % environment, 'w')
testsystem.exen_samplers[environment].geometry_pdbfile = open('abl-imatinib-%s-geometry-proposals.pdb' % environment, 'w')
#testsystem.mcmc_samplers[environment].run(niterations=5)
testsystem.exen_samplers[environment].run(niterations=100)
#testsystem.sams_samplers[environment].run(niterations=5)
#testsystem.designer.verbose = True
#testsystem.designer.run(niterations=500)
#testsystem.exen_samplers[solvent + '-peptide'].verbose=True
#testsystem.exen_samplers[solvent + '-peptide'].run(niterations=100)
def run_kinase_inhibitors():
"""
Run kinase inhibitors test system.
"""
with open("mapperkinase3.json", 'r') as jsoninput:
json_dict = jsoninput.read()
testsystem = KinaseInhibitorsTestSystem(ncmc_nsteps=100, mcmc_nsteps=10, premapped_json_dict=json_dict, constraints=None)
environment = 'vacuum'
testsystem.exen_samplers[environment].pdbfile = open('kinase-inhibitors-vacuum.pdb', 'w')
testsystem.exen_samplers[environment].geometry_pdbfile = open('kinase-inhibitors-%s-geometry-proposals.pdb' % environment, 'w')
testsystem.exen_samplers[environment].geometry_engine.write_proposal_pdb = True # write proposal PDBs
testsystem.exen_samplers[environment].geometry_engine.verbose = True
testsystem.sams_samplers[environment].run(niterations=100)
def run_valence_system():
"""
Run valence molecules test system.
This system only has one environment (vacuum), so SAMS is used.
"""
testsystem = ValenceSmallMoleculeLibraryTestSystem(storage_filename='output.nc', ncmc_nsteps=0, mcmc_nsteps=10)
environment = 'vacuum'
testsystem.exen_samplers[environment].pdbfile = open('valence.pdb', 'w')
testsystem.sams_samplers[environment].run(niterations=50)
def run_alanine_system(sterics=False):
"""
Run alanine dipeptide in vacuum test system.
If `sterics == True`, then sterics will be included.
Otherwise, only valence terms are used.
"""
if sterics:
testsystem = AlanineDipeptideTestSystem(storage_filename='output.nc', ncmc_nsteps=0, mcmc_nsteps=100)
else:
testsystem = AlanineDipeptideValenceTestSystem(storage_filename='output.nc', ncmc_nsteps=0, mcmc_nsteps=100)
environment = 'vacuum'
print(testsystem.__class__.__name__)
testsystem.exen_samplers[environment].pdbfile = open('valence.pdb', 'w')
testsystem.sams_samplers[environment].update_method = 'two-stage'
testsystem.sams_samplers[environment].second_stage_start = 100 # iteration to start second stage
testsystem.sams_samplers[environment].run(niterations=200)
def test_valence_write_pdb_ncmc_switching():
"""
Run abl test system.
"""
testsystem = ValenceSmallMoleculeLibraryTestSystem(ncmc_nsteps=10, mcmc_nsteps=10)
environment = 'vacuum'
testsystem.exen_samplers[environment].run(niterations=1)
def run_abl_affinity_write_pdb_ncmc_switching():
"""
Run abl test system.
"""
testsystem = AblAffinityTestSystem(ncmc_nsteps=10000, mcmc_nsteps=10000)
#for environment in testsystem.environments:
for environment in ['vacuum-complex']:
print(environment)
testsystem.exen_samplers[environment].pdbfile = open('abl-imatinib-%s.pdb' % environment, 'w')
testsystem.exen_samplers[environment].geometry_pdbfile = open('abl-imatinib-%s-geometry-proposals.pdb' % environment, 'w')
testsystem.exen_samplers[environment].verbose = True
testsystem.sams_samplers[environment].verbose = True
#testsystem.mcmc_samplers[environment].run(niterations=5)
testsystem.exen_samplers[environment].run(niterations=5)
#testsystem.sams_samplers[environment].run(niterations=5)
#testsystem.designer.verbose = True
#testsystem.designer.run(niterations=500)
#testsystem.exen_samplers[solvent + '-peptide'].verbose=True
#testsystem.exen_samplers[solvent + '-peptide'].run(niterations=100)
def run_constph_abl():
"""
Run Abl:imatinib constant-pH test system.
"""
testsystem = AblImatinibProtonationStateTestSystem(ncmc_nsteps=50, mcmc_nsteps=2500)
for environment in testsystem.environments:
#for environment in ['explicit-inhibitor', 'explicit-complex']:
#for environment in ['vacuum-inhibitor', 'vacuum-complex']:
if environment not in testsystem.exen_samplers:
print("Skipping '%s' for now..." % environment)
continue
print(environment)
testsystem.exen_samplers[environment].pdbfile = open('abl-imatinib-constph-%s.pdb' % environment, 'w')
testsystem.exen_samplers[environment].geometry_pdbfile = open('abl-imatinib-constph-%s-geometry-proposals.pdb' % environment, 'w')
testsystem.exen_samplers[environment].verbose = True
testsystem.exen_samplers[environment].proposal_engine.verbose = True
testsystem.sams_samplers[environment].verbose = True
#testsystem.mcmc_samplers[environment].run(niterations=5)
#testsystem.exen_samplers[environment].run(niterations=5)
#testsystem.sams_samplers[environment].run(niterations=5)
# Run ligand in solvent constant-pH sampler calibration
testsystem.sams_samplers['explicit-inhibitor'].verbose=True
testsystem.sams_samplers['explicit-inhibitor'].run(niterations=100)
#testsystem.exen_samplers['vacuum-inhibitor'].verbose=True
#testsystem.exen_samplers['vacuum-inhibitor'].run(niterations=100)
#testsystem.exen_samplers['explicit-complex'].verbose=True
#testsystem.exen_samplers['explicit-complex'].run(niterations=100)
# Run constant-pH sampler
testsystem.designer.verbose = True
testsystem.designer.update_target_probabilities() # update log weights from inhibitor in solvent calibration
testsystem.designer.run(niterations=500)
def run_imidazole():
"""
Run imidazole constant-pH test system.
"""
testsystem = ImidazoleProtonationStateTestSystem(storage_filename='output.nc', ncmc_nsteps=500, mcmc_nsteps=1000)
for environment in testsystem.environments:
if environment not in testsystem.exen_samplers:
print("Skipping '%s' for now..." % environment)
continue
print(environment)
#testsystem.exen_samplers[environment].pdbfile = open('imidazole-constph-%s.pdb' % environment, 'w')
#testsystem.exen_samplers[environment].geometry_pdbfile = open('imidazole-constph-%s-geometry-proposals.pdb' % environment, 'w')
testsystem.exen_samplers[environment].verbose = True
testsystem.exen_samplers[environment].proposal_engine.verbose = True
testsystem.sams_samplers[environment].verbose = True
# Run ligand in solvent constant-pH sampler calibration
testsystem.sams_samplers['explicit-imidazole'].verbose=True
testsystem.sams_samplers['explicit-imidazole'].run(niterations=100)
def run_fused_rings():
"""
Run fused rings test system.
Vary number of NCMC steps
"""
#nsteps_to_try = [1, 10, 100, 1000, 10000, 100000] # number of NCMC steps
nsteps_to_try = [10, 100, 1000, 10000, 100000] # number of NCMC steps
for ncmc_steps in nsteps_to_try:
storage_filename = 'output-%d.nc' % ncmc_steps
testsystem = FusedRingsTestSystem(storage_filename=storage_filename, ncmc_nsteps=nsteps_to_try, mcmc_nsteps=100)
for environment in ['explicit', 'vacuum']:
testsystem.exen_samplers[environment].ncmc_engine.verbose = True # verbose output of work
testsystem.sams_samplers[environment].verbose = True
testsystem.designer.verbose = True
testsystem.designer.run(niterations=100)
# Analyze data.
from perses.analysis import Analysis
analysis = Analysis(storage_filename=storage_filename)
#analysis.plot_sams_weights('sams.pdf')
analysis.plot_ncmc_work('ncmc-%d.pdf' % ncmc_steps)
if __name__ == '__main__':
#testsystem = PropaneTestSystem(scheme='geometry-ncmc-geometry', options = {'nsteps':10})
#run_null_system(testsystem)
#run_alanine_system(sterics=False)
#run_fused_rings()
#run_valence_system()
run_alkanes()
#run_imidazole()
#run_constph_abl()
#run_abl_affinity_write_pdb_ncmc_switching()
#run_kinase_inhibitors()
#run_abl_imatinib()
#run_myb()
| 49.202024
| 289
| 0.688918
| 13,202
| 131,271
| 6.697773
| 0.065596
| 0.027323
| 0.015607
| 0.010518
| 0.794083
| 0.768818
| 0.748519
| 0.735615
| 0.727031
| 0.712454
| 0
| 0.007266
| 0.222022
| 131,271
| 2,667
| 290
| 49.220472
| 0.858567
| 0.278119
| 0
| 0.678792
| 0
| 0.002059
| 0.080395
| 0.012328
| 0
| 0
| 0
| 0.00375
| 0.000686
| 1
| 0.027454
| false
| 0.000686
| 0.073439
| 0
| 0.115992
| 0.024022
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7ec49147eedb2ad149fd07ccdbfa7bc6e453b6ed
| 1,005
|
py
|
Python
|
channels/snap.py
|
Dinxor/tstore
|
ff2bb229ad2169926046076022b5a37025e98877
|
[
"MIT"
] | null | null | null |
channels/snap.py
|
Dinxor/tstore
|
ff2bb229ad2169926046076022b5a37025e98877
|
[
"MIT"
] | null | null | null |
channels/snap.py
|
Dinxor/tstore
|
ff2bb229ad2169926046076022b5a37025e98877
|
[
"MIT"
] | null | null | null |
import snap7
import struct
def get_float(plc_addr, plc_area, target, label):
try:
plc = snap7.client.Client()
plc.connect(*plc_addr)
rezult = plc.read_area(*plc_area)
plc.disconnect()
rez = []
for i in range(0, len(rezult), 4):
f = int.from_bytes([rezult[x] for x in range (i, i+4)], byteorder='big')
tval = struct.unpack('f', struct.pack('I', f))[0]
rez.append(round(tval, 2))
target.put([label, rez], block=True)
except:
return 1
return 0
def get_int(plc_addr, plc_area, target, label):
try:
plc = snap7.client.Client()
plc.connect(*plc_addr)
rezult = plc.read_area(*plc_area)
plc.disconnect()
rez = []
for i in range(0, len(rezult), 2):
f = int.from_bytes([rezult[x] for x in range (i, i+2)], byteorder='big')
rez.append(f)
target.put([label, rez], block=True)
except:
return 1
return 0
| 28.714286
| 84
| 0.551244
| 143
| 1,005
| 3.776224
| 0.307692
| 0.051852
| 0.037037
| 0.051852
| 0.759259
| 0.759259
| 0.759259
| 0.759259
| 0.759259
| 0.759259
| 0
| 0.021552
| 0.307463
| 1,005
| 34
| 85
| 29.558824
| 0.75431
| 0
| 0
| 0.645161
| 0
| 0
| 0.00796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.064516
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7ee1b5ed3ae06d921bbaa2ab48c094a5775f5493
| 223
|
py
|
Python
|
objectModel/Python/tests/__init__.py
|
aaron-emde/CDM
|
9472e9c7694821ac4a9bbe608557d2e65aabc73e
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
objectModel/Python/tests/__init__.py
|
aaron-emde/CDM
|
9472e9c7694821ac4a9bbe608557d2e65aabc73e
|
[
"CC-BY-4.0",
"MIT"
] | 3
|
2021-05-11T23:57:12.000Z
|
2021-08-04T05:03:05.000Z
|
objectModel/Python/tests/__init__.py
|
aaron-emde/CDM
|
9472e9c7694821ac4a9bbe608557d2e65aabc73e
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
#------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#------------------------------------------------------------------------------
| 44.6
| 80
| 0.210762
| 8
| 223
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049327
| 223
| 4
| 81
| 55.75
| 0.221698
| 0.959641
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 0
| null | null | 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7d0f821edc05a12e8194dd420080d585f1267156
| 45
|
py
|
Python
|
makahiki/apps/managers/player_mgr/management/commands/__init__.py
|
justinslee/Wai-Not-Makahiki
|
4b7dd685012ec64758affe0ecee3103596d16aa7
|
[
"MIT"
] | 1
|
2015-07-22T11:31:20.000Z
|
2015-07-22T11:31:20.000Z
|
makahiki/apps/widgets/status/management/commands/__init__.py
|
justinslee/Wai-Not-Makahiki
|
4b7dd685012ec64758affe0ecee3103596d16aa7
|
[
"MIT"
] | null | null | null |
makahiki/apps/widgets/status/management/commands/__init__.py
|
justinslee/Wai-Not-Makahiki
|
4b7dd685012ec64758affe0ecee3103596d16aa7
|
[
"MIT"
] | null | null | null |
"""Implements player management commands."""
| 22.5
| 44
| 0.755556
| 4
| 45
| 8.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.829268
| 0.844444
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7d1620aaf2c20d65a4b968ac23a191743608c01d
| 165
|
py
|
Python
|
micro21cm/__init__.py
|
mirochaj/micro21cm
|
d5a576718967a4c82fcb2f23b03696c2ead75de3
|
[
"MIT"
] | 4
|
2022-01-17T17:23:13.000Z
|
2022-02-06T18:44:19.000Z
|
micro21cm/__init__.py
|
mirochaj/micro21cm
|
d5a576718967a4c82fcb2f23b03696c2ead75de3
|
[
"MIT"
] | 7
|
2021-12-06T21:50:35.000Z
|
2022-01-23T19:40:39.000Z
|
micro21cm/__init__.py
|
mirochaj/micro21cm
|
d5a576718967a4c82fcb2f23b03696c2ead75de3
|
[
"MIT"
] | null | null | null |
from .box import Box
from .models import BubbleModel
from .analysis import AnalyzeFit
from .inference import FitHelper
from .util import labels, get_cmd_line_kwargs
| 27.5
| 45
| 0.836364
| 24
| 165
| 5.625
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127273
| 165
| 5
| 46
| 33
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7d2c90c22b4c4d0bf9e1e5797f1655731a7db682
| 5,165
|
py
|
Python
|
src/indra_cogex/client/enrichment/utils.py
|
bgyori/indra_cogex
|
04a72d7941d4acd31ebfe73568114415d43394ea
|
[
"BSD-2-Clause"
] | 2
|
2021-05-27T02:44:09.000Z
|
2022-01-12T21:34:07.000Z
|
src/indra_cogex/client/enrichment/utils.py
|
bgyori/indra_cogex
|
04a72d7941d4acd31ebfe73568114415d43394ea
|
[
"BSD-2-Clause"
] | 33
|
2021-08-29T18:23:26.000Z
|
2022-03-29T21:56:08.000Z
|
src/indra_cogex/client/enrichment/utils.py
|
bgyori/indra_cogex
|
04a72d7941d4acd31ebfe73568114415d43394ea
|
[
"BSD-2-Clause"
] | 5
|
2021-06-15T09:01:23.000Z
|
2022-03-13T14:26:09.000Z
|
# -*- coding: utf-8 -*-
"""Utilities for getting gene sets."""
from collections import defaultdict
from functools import lru_cache
from textwrap import dedent
from indra_cogex.client.neo4j_client import Neo4jClient
__all__ = [
"collect_gene_sets",
"get_go",
"get_wikipathways",
"get_reactome",
"get_entity_to_targets",
"get_entity_to_regulators",
]
def collect_gene_sets(
client: Neo4jClient, query: str
) -> dict[tuple[str, str], set[str]]:
"""Collect gene sets based on the given query.
Parameters
----------
client :
The Neo4j client.
query:
A cypher query
Returns
-------
:
A dictionary whose keys that are 2-tuples of CURIE and name of each queried
item and whose values are sets of HGNC gene identifiers (as strings)
"""
curie_to_hgnc_ids = defaultdict(set)
for result in client.query_tx(query):
curie = result[0]
name = result[1]
hgnc_ids = {
hgnc_curie.lower().removeprefix("hgnc:") for hgnc_curie in result[2]
}
curie_to_hgnc_ids[curie, name].update(hgnc_ids)
return dict(curie_to_hgnc_ids)
@lru_cache(maxsize=1)
def get_go(client: Neo4jClient) -> dict[tuple[str, str], set[str]]:
"""Get GO gene sets.
Parameters
----------
client :
The Neo4j client.
Returns
-------
:
A dictionary whose keys that are 2-tuples of CURIE and name of each GO term
and whose values are sets of HGNC gene identifiers (as strings)
"""
query = dedent(
"""\
MATCH (gene:BioEntity)-[:associated_with]->(term:BioEntity)
RETURN term.id, term.name, collect(gene.id) as gene_curies;
"""
)
return collect_gene_sets(client, query)
@lru_cache(maxsize=1)
def get_wikipathways(client: Neo4jClient) -> dict[tuple[str, str], set[str]]:
"""Get WikiPathways gene sets.
Parameters
----------
client :
The Neo4j client.
Returns
-------
:
A dictionary whose keys that are 2-tuples of CURIE and name of each WikiPathway
pathway and whose values are sets of HGNC gene identifiers (as strings)
"""
query = dedent(
"""\
MATCH (pathway:BioEntity)-[:haspart]->(gene:BioEntity)
WHERE pathway.id STARTS WITH "wikipathways" and gene.id STARTS WITH "hgnc"
RETURN pathway.id, pathway.name, collect(gene.id);
"""
)
return collect_gene_sets(client, query)
@lru_cache(maxsize=1)
def get_reactome(client: Neo4jClient) -> dict[tuple[str, str], set[str]]:
"""Get Reactome gene sets.
Parameters
----------
client :
The Neo4j client.
Returns
-------
:
A dictionary whose keys that are 2-tuples of CURIE and name of each Reactome
pathway and whose values are sets of HGNC gene identifiers (as strings)
"""
query = dedent(
"""\
MATCH (pathway:BioEntity)-[:haspart]-(gene:BioEntity)
WHERE pathway.id STARTS WITH "reactome" and gene.id STARTS WITH "hgnc"
RETURN pathway.id, pathway.name, collect(gene.id);
"""
)
return collect_gene_sets(client, query)
@lru_cache(maxsize=1)
def get_entity_to_targets(client: Neo4jClient) -> dict[tuple[str, str], set[str]]:
"""Get a mapping from each entity in the INDRA database to the set of
human genes that it regulates.
Parameters
----------
client :
The Neo4j client.
Returns
-------
:
A dictionary whose keys that are 2-tuples of CURIE and name of each entity
and whose values are sets of HGNC gene identifiers (as strings)
"""
query = dedent(
"""\
MATCH (regulator:BioEntity)-[r:indra_rel]->(gene:BioEntity)
// Collecting human genes only
WHERE gene.id STARTS WITH "hgnc"
// Ignore complexes since they are non-directional
AND r.stmt_type <> "Complex"
// This is a simple way to ignore non-human proteins
AND NOT regulator.id STARTS WITH "uniprot"
RETURN regulator.id, regulator.name, collect(gene.id);
"""
)
return collect_gene_sets(client, query)
@lru_cache(maxsize=1)
def get_entity_to_regulators(client: Neo4jClient) -> dict[tuple[str, str], set[str]]:
"""Get a mapping from each entity in the INDRA database to the set of
human genes are causally upstream of it.
Parameters
----------
client :
The Neo4j client.
Returns
-------
:
A dictionary whose keys that are 2-tuples of CURIE and name of each entity
and whose values are sets of HGNC gene identifiers (as strings)
"""
query = dedent(
"""\
MATCH (gene:BioEntity)-[r:indra_rel]->(target:BioEntity)
// Collecting human genes only
WHERE gene.id STARTS WITH "hgnc"
// Ignore complexes since they are non-directional
AND r.stmt_type <> "Complex"
// This is a simple way to ignore non-human proteins
AND NOT regulator.id STARTS WITH "uniprot"
RETURN target.id, target.name, collect(gene.id);
"""
)
return collect_gene_sets(client, query)
| 28.070652
| 87
| 0.626137
| 667
| 5,165
| 4.752624
| 0.176912
| 0.04511
| 0.037855
| 0.039748
| 0.729968
| 0.720505
| 0.70694
| 0.70694
| 0.70694
| 0.668139
| 0
| 0.007614
| 0.262536
| 5,165
| 183
| 88
| 28.224044
| 0.824626
| 0.339013
| 0
| 0.306122
| 0
| 0
| 0.059905
| 0.02669
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122449
| false
| 0
| 0.081633
| 0
| 0.326531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7d4f8733cde041f966feb0f7d3ce3b731986a2f8
| 49
|
py
|
Python
|
algorithms/__init__.py
|
lisenbuaa/RRL
|
5db0d753486de1518af1077c6ef2121da41486f8
|
[
"BSD-3-Clause"
] | 4
|
2021-11-26T09:15:00.000Z
|
2022-01-11T06:29:57.000Z
|
algorithms/__init__.py
|
lisenbuaa/RRL
|
5db0d753486de1518af1077c6ef2121da41486f8
|
[
"BSD-3-Clause"
] | null | null | null |
algorithms/__init__.py
|
lisenbuaa/RRL
|
5db0d753486de1518af1077c6ef2121da41486f8
|
[
"BSD-3-Clause"
] | 3
|
2021-12-15T16:12:44.000Z
|
2022-03-14T01:31:58.000Z
|
from .Algorithm import *
from .Model import Model
| 24.5
| 24
| 0.795918
| 7
| 49
| 5.571429
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 2
| 25
| 24.5
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
adc967ff6314b7b59f7ef4293ea6b95c84584403
| 59
|
py
|
Python
|
My_work/appengine/index_redirect.py
|
Serag8/Bachelor
|
097c0ad2264e9c8790afcdbafa8e7fe8f46410a3
|
[
"MIT"
] | null | null | null |
My_work/appengine/index_redirect.py
|
Serag8/Bachelor
|
097c0ad2264e9c8790afcdbafa8e7fe8f46410a3
|
[
"MIT"
] | null | null | null |
My_work/appengine/index_redirect.py
|
Serag8/Bachelor
|
097c0ad2264e9c8790afcdbafa8e7fe8f46410a3
|
[
"MIT"
] | null | null | null |
print("Status: 302")
print("Location: /static/index.html")
| 19.666667
| 37
| 0.711864
| 8
| 59
| 5.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 0.067797
| 59
| 2
| 38
| 29.5
| 0.709091
| 0
| 0
| 0
| 0
| 0
| 0.661017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
add293a56de64db1985341d6e85d448828791f91
| 49
|
py
|
Python
|
juicy-jaguars/Web95/Web95/__init__.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 40
|
2020-08-02T07:38:22.000Z
|
2021-07-26T01:46:50.000Z
|
juicy-jaguars/Web95/Web95/__init__.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 134
|
2020-07-31T12:15:45.000Z
|
2020-12-13T04:42:19.000Z
|
juicy-jaguars/Web95/Web95/__init__.py
|
Vthechamp22/summer-code-jam-2021
|
0a8bf1f22f6c73300891fd779da36efd8e1304c1
|
[
"MIT"
] | 101
|
2020-07-31T12:00:47.000Z
|
2021-11-01T09:06:58.000Z
|
"""Make python think this folder is a module."""
| 24.5
| 48
| 0.693878
| 8
| 49
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 49
| 1
| 49
| 49
| 0.829268
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
ade8a31d2537f64ec46dbadca93e6e53aec847b0
| 17,656
|
py
|
Python
|
ziloreapi/api.py
|
clyang/python-zilore
|
cb646fffb524a8eeeaca2abfeb8f574398efd8f7
|
[
"MIT"
] | 15
|
2019-09-17T02:14:32.000Z
|
2019-12-10T14:15:54.000Z
|
ziloreapi/api.py
|
clyang/python-zilore
|
cb646fffb524a8eeeaca2abfeb8f574398efd8f7
|
[
"MIT"
] | null | null | null |
ziloreapi/api.py
|
clyang/python-zilore
|
cb646fffb524a8eeeaca2abfeb8f574398efd8f7
|
[
"MIT"
] | 2
|
2019-12-10T14:16:00.000Z
|
2020-11-12T10:00:27.000Z
|
import requests
import logging
logger = logging.getLogger(__name__)
class Api(object):
def __init__(self, x_auth_key):
self._http_header = {'X-Auth-Key': x_auth_key}
self._urlbase = 'https://api.zilore.com/dns/v1/{0}?{0}'.format('{}')
def _do_request(self, function, params='', method='get'):
method = method.upper()
logger.debug("Performing request using method {}".format(method))
response = self._do_raw_request(function, params, method)
logger.debug("Response: %s", format(response))
return response.json()
def _do_raw_request(self, function, params='', method='get'):
if method == 'GET':
return requests.get(self._urlbase.format(function, params), headers=self._http_header)
elif method == 'POST':
return requests.post(self._urlbase.format(function, params), headers=self._http_header)
elif method == 'DELETE':
return requests.delete(self._urlbase.format(function, params), headers=self._http_header)
elif method == 'PUT':
return requests.put(self._urlbase.format(function, params), headers=self._http_header)
def test_login(self):
response = self._do_raw_request('domains')
result = response.json()
if 'status' in result and result['status'] == 'ok':
return True
else:
return False
def list_domains(self, offset=0, limit=1000, order_by='', order_param='', search_text=''):
params = 'offset={}&limit={}&order_by={}&order_param={}&search_text={}'.format(offset, limit, order_by, order_param, search_text)
return self._do_request('domains', params)
def list_tlds(self, version='', tlds=''):
params = 'version={}&tlds={}'.format(version, tlds)
return self._do_request('tlds', params)
def list_nameservers(self, domain_name=''):
return self._do_request('domains/{}/nameservers'.format(domain_name))
def list_statistics(self, domain_name='', period=''):
params = 'period={}'.format(period)
return self._do_request('domains/{}/statistics'.format(domain_name))
def add_domain(self, domain_name=[]):
params = 'domain_name={}'.format(','.join(domain_name))
return self._do_request('domains', params, 'post')
def delete_domain(self, domain_id=[], domain_name=[]):
if isinstance(domain_id, int):
domain_id = [domain_id]
params = 'domain_id={}&domain_name={}'.format(','.join(str(x) for x in domain_id), ','.join(domain_name))
return self._do_request('domains', params, 'delete')
def list_records(self, domain_name='', offset=0, limit=10000, order_by='', order_param='', search_text=''):
params = 'offset={}&limit={}&order_by={}&order_param={}&search_text={}'.format(offset, limit, order_by, order_param, search_text)
return self._do_request('domains/{}/records'.format(domain_name), params)
def list_valid_record_ttl(self):
return self._do_request('settings/ttl')
def add_record(self, domain_name='', record_type='', record_ttl=600, record_name='', record_value=''):
record_type = record_type.upper()
if not record_name.endswith(domain_name):
record_name = '{}.{}'.format(record_name, domain_name)
if record_type == 'TXT':
record_value = '"{}"'.format(record_value)
params = 'record_type={}&record_ttl={}&record_name={}&record_value={}'.format(record_type, record_ttl, record_name, record_value)
return self._do_request('domains/{}/records'.format(domain_name), params, 'post')
def update_record(self, domain_name='', record_id=None, record_type='', record_ttl=600, record_name='', record_value=''):
record_type = record_type.upper()
if record_name != '' and not record_name.endswith(domain_name):
record_name = '{}.{}'.format(record_name, domain_name)
if record_type == 'TXT':
record_value = '"{}"'.format(record_value)
args = locals()
params = ''
for k, v in args.items():
if k in ['self', 'domain_name', 'record_id']:
continue
if v != '' and v is not None:
params += '&{}={}'.format(k, v)
return self._do_request('domains/{}/records/{}'.format(domain_name, record_id), params, 'put')
def update_record_status(self, domain_name='', record_id=None, record_status=None):
params = 'record_status={}'.format(record_status)
return self._do_request('domains/{}/records/{}/status'.format(domain_name,record_id), params, 'put')
def delete_record(self, domain_name='', record_id=[]):
if isinstance(record_id, int):
record_id = [record_id]
params = 'record_id={}'.format(','.join(str(x) for x in record_id))
return self._do_request('domains/{}/records'.format(domain_name), params, 'delete')
def list_snapshots(self, domain_name=''):
return self._do_request('domains/{}/snapshots'.format(domain_name), '', 'get')
def list_snapshots_records(self, domain_name='', snapshot_id=''):
return self._do_request('domains/{}/snapshots/{}/records'.format(domain_name, snapshot_id), '', 'get')
def restore_snapshot(self, domain_name='', snapshot_id=''):
return self._do_request('domains/{}/snapshots/{}/restore'.format(domain_name, snapshot_id), '', 'post')
def geo_records(self, domain_name=''):
return self._do_request('domains/{}/geo/defaults'.format(domain_name), '', 'get')
def list_geo_records(self, domain_name='', offset=0, limit='', order_by='', order_param='', search_text=''):
params = 'offset={}&limit={}&order_by={}&order_param={}&search_text={}'.format(offset, limit, order_by, order_param, search_text)
return self._do_request('domains/{}/geo'.format(domain_name), params, 'get')
def add_geo_record(self, domain_name='', record_name='', record_type='', geo_region='', record_value=''):
params = 'record_name={}&record_type={}&geo_region={}&record_value={}'.format(record_name, record_type, geo_region, record_value)
return self._do_request('domains/{}/geo'.format(domain_name), params, 'post')
def update_geo_record(self, domain_name='', record_id=None, geo_region='', record_value=''):
args = locals()
params = ''
for k, v in args.items():
if k in ['self', 'domain_name', 'record_id']:
continue
if v != '' and v is not None:
params += '&{}={}'.format(k, v)
return self._do_request('domains/{}/geo/{}'.format(domain_name, record_id), params, 'put')
def failover_records(self, domain_name=''):
return self._do_request('domains/{}/failovers/available'.format(domain_name), '', 'get')
def list_failover_records(self, domain_name='', offset=0, limit='', order_by='', order_param=''):
params = 'offset={}&limit={}&order_by={}&order_param={}'.format(offset, limit, order_by, order_param)
return self._do_request('domains/{}/failovers'.format(domain_name), params, 'get')
def add_failover_record(self, domain_name='', record_id=None, failover_check_type='', failover_check_interval='', failover_return_to_main_value='', failover_additional_port='', failover_record_backup_value=[], failover_use_fws='', failover_additional_response='', failover_additional_request='', failover_notification_email='', failover_notification_sms=''):
backup_value_str = ''
if isinstance(failover_record_backup_value, list) and failover_record_backup_value:
max_val = min(len(failover_record_backup_value) ,3)
for i in range(max_val):
backup_value_str += 'failover_record_backup_value[{}]={}&'.format(i, failover_record_backup_value[i])
params = 'record_id={}&failover_check_type={}&failover_check_interval={}&failover_return_to_main_value={}&failover_additional_port={}&failover_use_fws={}&failover_notification_email={}&failover_notification_sms={}'.format(record_id, failover_check_type, failover_check_interval, failover_return_to_main_value, failover_additional_port, failover_use_fws, failover_notification_email, failover_notification_sms)
if failover_check_type == 'TCP':
params = '{}&failover_additional_respons={}&failover_additional_request={}'.format(params, failover_additional_respons, failover_additional_request)
params = '{}&{}'.format(params, backup_value_str)
return self._do_request('domains/{}/failovers'.format(domain_name), params, 'post')
def update_failover_record(self, domain_name='', record_id=None, failover_check_type='', failover_check_interval='', failover_return_to_main_value='', failover_additional_port='', failover_record_backup_value=[], failover_use_fws='', failover_additional_response='', failover_additional_request='', failover_notification_email='', failover_notification_sms=''):
backup_value_str = ''
if isinstance(failover_record_backup_value, list) and failover_record_backup_value:
max_val = min(len(failover_record_backup_value) ,3)
for i in range(max_val):
backup_value_str += 'failover_record_backup_value[{}]={}&'.format(i, failover_record_backup_value[i])
params = 'failover_check_type={}&failover_check_interval={}&failover_return_to_main_value={}&failover_additional_port={}&failover_use_fws={}&failover_notification_email={}&failover_notification_sms={}'.format(record_id, failover_check_type, failover_check_interval, failover_return_to_main_value, failover_additional_port, failover_use_fws, failover_notification_email, failover_notification_sms)
if failover_check_type == 'TCP':
params = '{}&failover_additional_respons={}&failover_additional_request={}'.format(params, failover_additional_respons, failover_additional_request)
params = '{}&{}'.format(params, backup_value_str)
return self._do_request('domains/{}/failovers/{}'.format(domain_name, record_id), params, 'put')
def delete_failover_record(self, domain_name='', record_id=[]):
if isinstance(record_id, int):
record_id = [record_id]
params = 'record_id={}'.format(','.join(str(x) for x in record_id))
return self._do_request('domains/{}/failovers'.format(domain_name), params, 'delete')
def list_mf_addresses(self, domain_name='', offset=0, limit='', order_by='', order_param=''):
params = 'offset={}&limit={}&order_by={}&order_param={}'.format(offset, limit, order_by, order_param)
return self._do_request('domains/{}/mail_forwarding'.format(domain_name), params, 'get')
def add_mf_address(self, domain_name='', source='', destination=''):
suffix = '@{}'.format(domain_name)
source = source.replace(suffix, '')
params = 'source={}&destination={}'.format(source, destination)
return self._do_request('domains/{}/mail_forwarding'.format(domain_name), params, 'post')
def update_mf_address(self, domain_name='', mf_address_id='', source='', destination=''):
args = locals()
params = ''
for k, v in args.items():
if k in ['self', 'domain_name']:
continue
if v != '':
params += '&{}={}'.format(k, v)
params = params[1:]
suffix = '@{}'.format(domain_name)
params = params.replace(suffix, '')
return self._do_request('domains/{}/mail_forwarding/{}'.format(domain_name, mf_address_id), params, 'put')
def update_mf_address_status(self, domain_name='', mf_address_id=None, status=None):
params = 'status={}'.format(status)
return self._do_request('domains/{}/mail_forwarding/{}/status'.format(domain_name,mf_address_id), params, 'put')
def delete_mf_address(self, domain_name='', mf_address_id=[]):
if isinstance(mf_address_id, int):
mf_address_id = [mf_address_id]
params = 'mf_address_id={}'.format(','.join(str(x) for x in mf_address_id))
return self._do_request('domains/{}/mail_forwarding'.format(domain_name), params, 'delete')
def list_wf_addresses(self, domain_name='', offset=0, limit='', order_by='', order_param=''):
params = 'offset={}&limit={}&order_by={}&order_param={}'.format(offset, limit, order_by, order_param)
return self._do_request('domains/{}/web_forwarding'.format(domain_name), params, 'get')
def add_wf_address(self, domain_name='', https=None, code=None, source='', destination=''):
destination = destination.replace('http://', '')
destination = destination.replace('https://', '')
params = 'https={}&code={}&destination={}'.format(https, code, destination)
if source != '':
suffix = '.{}'.format(domain_name)
source = source.replace(suffix, '')
params = '{}&source={}'.format(params, source)
return self._do_request('domains/{}/web_forwarding'.format(domain_name), params, 'post')
def update_wf_address(self, domain_name='', wf_address_id=None, https=None, code=None, source='', destination=''):
destination = destination.replace('http://', '')
destination = destination.replace('https://', '')
if source != '':
source = source.replace('.{}'.format(domain_name), '')
args = locals()
params = ''
for k, v in args.items():
if k in ['self', 'domain_name', wf_address_id]:
continue
if v != '' and v is not None:
params += '&{}={}'.format(k, v)
params = params[1:]
return self._do_request('domains/{}/web_forwarding/{}'.format(domain_name, wf_address_id), params, 'put')
def update_wf_address_status(self, domain_name='', wf_address_id=None, status=None):
params = 'status={}'.format(status)
return self._do_request('domains/{}/web_forwarding/{}/status'.format(domain_name,wf_address_id), params, 'put')
def delete_wf_address(self, domain_name='', wf_address_id=[]):
if isinstance(wf_address_id, int):
wf_address_id = [wf_address_id]
params = 'wf_address_id={}'.format(','.join(str(x) for x in wf_address_id))
return self._do_request('domains/{}/web_forwarding'.format(domain_name), params, 'delete')
def list_custom_templates(self):
return self._do_request('templates', '', 'get')
def create_custom_template(self, custom_template_name='', custom_template_description=''):
params = 'custom_template_name={}'.format(custom_template_name)
if custom_template_description != '':
params = '{}&custom_template_description={}'.format(params, custom_template_description)
return self._do_request('templates', params, 'post')
def update_custom_template(self, template_id=None, custom_template_name='', custom_template_description=''):
args = locals()
params = ''
for k, v in args.items():
if k in ['self', 'template_id']:
continue
if v != '' and v is not None:
params += '&{}={}'.format(k, v)
params = params[1:]
return self._do_request('templates/{}'.format(template_id), params, 'put')
def delete_custom_template(self, template_id=None):
return self._do_request('templates/{}'.format(template_id), '', 'delete')
def restore_custom_template(self, domain_name='', template_id=None):
params = 'domain_name={}'.format(domain_name)
return self._do_request('templates/{}/restore'.format(template_id), params, 'post')
def list_custom_templates_records(self, template_id=None, domain_name=''):
params = ''
if domain_name != '':
params = 'domain_name={}'.format(domain_name)
return self._do_request('templates/{}/records'.format(template_id), params, 'get')
def add_custom_template_record(self, template_id=None, record_type='', record_ttl=600, record_name='', record_value=''):
record_type = record_type.upper()
if not record_name.endswith('.{{domain_name}}'):
record_name = '{}.{{domain_name}}'.format(record_name)
if record_type == 'TXT':
record_value = '"{}"'.format(record_value)
params = 'record_type={}&record_ttl={}&record_name={}&record_value={}'.format(record_type, record_ttl, record_name, record_value)
return self._do_request('templates/{}/records'.format(template_id), params, 'post')
def update_custom_template_record(self, template_id=None, record_id=None, record_type='', record_ttl='', record_name='', record_value=''):
record_type = record_type.upper()
if not record_name.endswith('.{{domain_name}}'):
record_name = '{}.{{domain_name}}'.format(record_name)
if record_type == 'TXT':
record_value = '"{}"'.format(record_value)
args = locals()
params = ''
for k, v in args.items():
if k in ['self', 'template_id', 'record_id']:
continue
if v != '' and v is not None:
params += '&{}={}'.format(k, v)
return self._do_request('templates/{}/records/{}'.format(template_id, record_id), params, 'put')
def delete_custom_template_record(self, template_id='', record_id=[]):
if isinstance(record_id, int):
record_id = [record_id]
params = 'record_id={}'.format(','.join(str(x) for x in record_id))
return self._do_request('template/{}/records'.format(template_id), params, 'delete')
| 52.236686
| 417
| 0.654055
| 2,145
| 17,656
| 5.045688
| 0.068998
| 0.080384
| 0.047676
| 0.075487
| 0.837014
| 0.802827
| 0.745819
| 0.709046
| 0.652684
| 0.600758
| 0
| 0.002225
| 0.185433
| 17,656
| 337
| 418
| 52.391691
| 0.750313
| 0
| 0
| 0.417323
| 0
| 0
| 0.163231
| 0.099513
| 0
| 0
| 0
| 0
| 0
| 1
| 0.185039
| false
| 0
| 0.007874
| 0.035433
| 0.393701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
adf94fd4e6f14f8191f7ba4c05d230bffa21cea1
| 358
|
py
|
Python
|
tests/types/checkers_test.py
|
johnnv1/CCAgT_dataset_utils
|
362ac5f664d73c0cfa1b1a21c62f03318dcc4c32
|
[
"Apache-2.0"
] | 1
|
2022-02-23T20:29:16.000Z
|
2022-02-23T20:29:16.000Z
|
tests/types/checkers_test.py
|
johnnv1/CCAgT_dataset_utils
|
362ac5f664d73c0cfa1b1a21c62f03318dcc4c32
|
[
"Apache-2.0"
] | 32
|
2022-02-18T23:38:00.000Z
|
2022-03-31T22:42:00.000Z
|
tests/types/checkers_test.py
|
johnnv1/CCAgT-utils
|
2a12fcb2cd3a770aa81a9e75ed6ad68077b72bfb
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from CCAgT_utils.types import checkers
def test_is_2d():
assert checkers.is_2d((100, 200))
assert not checkers.is_2d((100, 200, 300))
def test_is_rgb_shape():
assert checkers.is_rgb_shape((100, 200, 3))
assert not checkers.is_rgb_shape((100, 200, 300))
assert not checkers.is_rgb_shape((100, 200))
| 23.866667
| 53
| 0.726257
| 58
| 358
| 4.172414
| 0.344828
| 0.206612
| 0.165289
| 0.235537
| 0.520661
| 0.371901
| 0.272727
| 0.272727
| 0
| 0
| 0
| 0.133333
| 0.162011
| 358
| 14
| 54
| 25.571429
| 0.673333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.555556
| 1
| 0.222222
| true
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.