hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c4e2e10f49a13463b769ca74126afd1bddc503bc | 25 | py | Python | my_classes/.history/ModulesPackages_PackageNamespaces/modules_1_20210725183904.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/ModulesPackages_PackageNamespaces/modules_1_20210725183904.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | my_classes/.history/ModulesPackages_PackageNamespaces/modules_1_20210725183904.py | minefarmer/deep-Dive-1 | b0675b853180c5b5781888266ea63a3793b8d855 | [
"Unlicense"
] | null | null | null | print('------- Running ') | 25 | 25 | 0.48 | 2 | 25 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08 | 25 | 1 | 25 | 25 | 0.521739 | 0 | 0 | 0 | 0 | 0 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 6 |
f2145a805a84e871619813e991fee507efcf532d | 64 | py | Python | primefactors/test_prime_factor.py | gartee-john-PFG/WorkshopAdvancedTDDPython | 72ae1ab8dfdd6d0c5c74e87edd3c4a79f121c57c | [
"MIT"
] | null | null | null | primefactors/test_prime_factor.py | gartee-john-PFG/WorkshopAdvancedTDDPython | 72ae1ab8dfdd6d0c5c74e87edd3c4a79f121c57c | [
"MIT"
] | null | null | null | primefactors/test_prime_factor.py | gartee-john-PFG/WorkshopAdvancedTDDPython | 72ae1ab8dfdd6d0c5c74e87edd3c4a79f121c57c | [
"MIT"
] | null | null | null | # from prime_factor import *
def test_prime_factor():
pass
| 12.8 | 28 | 0.71875 | 9 | 64 | 4.777778 | 0.777778 | 0.511628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.203125 | 64 | 4 | 29 | 16 | 0.843137 | 0.40625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
f21ab7bd417b312bb71bbd988ca2f7088fa8afc1 | 32 | py | Python | main/sqla/__init__.py | gwynethbradbury/itdb | 0664100b00ed8cf7d4565a0b2b90e089ad528733 | [
"BSD-3-Clause"
] | null | null | null | main/sqla/__init__.py | gwynethbradbury/itdb | 0664100b00ed8cf7d4565a0b2b90e089ad528733 | [
"BSD-3-Clause"
] | null | null | null | main/sqla/__init__.py | gwynethbradbury/itdb | 0664100b00ed8cf7d4565a0b2b90e089ad528733 | [
"BSD-3-Clause"
] | null | null | null |
import app
#app.start_app()
| 4.571429 | 16 | 0.65625 | 5 | 32 | 4 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.21875 | 32 | 6 | 17 | 5.333333 | 0.8 | 0.46875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
48148e5ff9d5140098b424ba916ced82bb65141c | 10,955 | py | Python | aries_cloudagent/protocols/actionmenu/v1_0/tests/test_routes.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 247 | 2019-07-02T21:10:21.000Z | 2022-03-30T13:55:33.000Z | aries_cloudagent/protocols/actionmenu/v1_0/tests/test_routes.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 1,462 | 2019-07-02T20:57:30.000Z | 2022-03-31T23:13:35.000Z | aries_cloudagent/protocols/actionmenu/v1_0/tests/test_routes.py | kuraakhilesh8230/aries-cloudagent-python | ee384d1330f6a50ff45a507392ce54f92900f23a | [
"Apache-2.0"
] | 377 | 2019-06-20T21:01:31.000Z | 2022-03-30T08:27:53.000Z | from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from .....admin.request_context import AdminRequestContext
from .....storage.error import StorageNotFoundError
from .. import routes as test_module
class TestActionMenuRoutes(AsyncTestCase):
def setUp(self):
self.session_inject = {}
self.context = AdminRequestContext.test_context(self.session_inject)
self.request_dict = {
"context": self.context,
"outbound_message_router": async_mock.CoroutineMock(),
}
self.request = async_mock.MagicMock(
app={},
match_info={},
query={},
__getitem__=lambda _, k: self.request_dict[k],
)
async def test_actionmenu_close(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
test_module.retrieve_connection_menu = async_mock.CoroutineMock()
test_module.save_connection_menu = async_mock.CoroutineMock()
with async_mock.patch.object(test_module.web, "json_response") as mock_response:
res = await test_module.actionmenu_close(self.request)
mock_response.assert_called_once_with({})
async def test_actionmenu_close_x(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
test_module.retrieve_connection_menu = async_mock.CoroutineMock()
test_module.save_connection_menu = async_mock.CoroutineMock(
side_effect=test_module.StorageError()
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.actionmenu_close(self.request)
async def test_actionmenu_close_not_found(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
test_module.retrieve_connection_menu = async_mock.CoroutineMock(
return_value=None
)
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.actionmenu_close(self.request)
async def test_actionmenu_fetch(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
test_module.retrieve_connection_menu = async_mock.CoroutineMock(
return_value=None
)
with async_mock.patch.object(test_module.web, "json_response") as mock_response:
res = await test_module.actionmenu_fetch(self.request)
mock_response.assert_called_once_with({"result": None})
async def test_actionmenu_perform(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "Perform", autospec=True
) as mock_perform, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock()
res = await test_module.actionmenu_perform(self.request)
mock_response.assert_called_once_with({})
self.request["outbound_message_router"].assert_called_once_with(
mock_perform.return_value,
connection_id=self.request.match_info["conn_id"],
)
async def test_actionmenu_perform_no_conn_record(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "Perform", autospec=True
) as mock_perform:
# Emulate storage not found (bad connection id)
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock(
side_effect=StorageNotFoundError
)
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.actionmenu_perform(self.request)
async def test_actionmenu_perform_conn_not_ready(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "Perform", autospec=True
) as mock_perform:
# Emulate connection not ready
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_record.retrieve_by_id.return_value.is_ready = False
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.actionmenu_perform(self.request)
async def test_actionmenu_request(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "MenuRequest", autospec=True
) as menu_request, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock()
res = await test_module.actionmenu_request(self.request)
mock_response.assert_called_once_with({})
self.request["outbound_message_router"].assert_called_once_with(
menu_request.return_value,
connection_id=self.request.match_info["conn_id"],
)
async def test_actionmenu_request_no_conn_record(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "Perform", autospec=True
) as mock_perform:
# Emulate storage not found (bad connection id)
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock(
side_effect=StorageNotFoundError
)
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.actionmenu_request(self.request)
async def test_actionmenu_request_conn_not_ready(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "Perform", autospec=True
) as mock_perform:
# Emulate connection not ready
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_record.retrieve_by_id.return_value.is_ready = False
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.actionmenu_request(self.request)
async def test_actionmenu_send(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "Menu", autospec=True
) as mock_menu, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock()
mock_menu.deserialize = async_mock.MagicMock()
res = await test_module.actionmenu_send(self.request)
mock_response.assert_called_once_with({})
self.request["outbound_message_router"].assert_called_once_with(
mock_menu.deserialize.return_value,
connection_id=self.request.match_info["conn_id"],
)
async def test_actionmenu_send_deserialize_x(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "Menu", autospec=True
) as mock_menu:
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock()
mock_menu.deserialize = async_mock.MagicMock(
side_effect=test_module.BaseModelError("cannot deserialize")
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.actionmenu_send(self.request)
async def test_actionmenu_send_no_conn_record(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "Menu", autospec=True
) as mock_menu:
mock_menu.deserialize = async_mock.MagicMock()
# Emulate storage not found (bad connection id)
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock(
side_effect=StorageNotFoundError
)
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.actionmenu_send(self.request)
async def test_actionmenu_send_conn_not_ready(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"conn_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_record, async_mock.patch.object(
test_module, "Menu", autospec=True
) as mock_menu:
mock_menu.deserialize = async_mock.MagicMock()
# Emulate connection not ready
mock_conn_record.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_record.retrieve_by_id.return_value.is_ready = False
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.actionmenu_send(self.request)
async def test_register(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
async def test_post_process_routes(self):
mock_app = async_mock.MagicMock(_state={"swagger_dict": {}})
test_module.post_process_routes(mock_app)
assert "tags" in mock_app._state["swagger_dict"]
| 40.275735 | 88 | 0.665906 | 1,263 | 10,955 | 5.441013 | 0.077593 | 0.085128 | 0.099243 | 0.072759 | 0.878201 | 0.855064 | 0.846624 | 0.83702 | 0.824505 | 0.824505 | 0 | 0 | 0.24628 | 10,955 | 271 | 89 | 40.424354 | 0.832264 | 0.020447 | 0 | 0.658537 | 0 | 0 | 0.052872 | 0.008579 | 0 | 0 | 0 | 0 | 0.092683 | 1 | 0.004878 | false | 0 | 0.02439 | 0 | 0.034146 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
481829789616b20be1176df25ea8f38efb0eeed6 | 29 | py | Python | deadset/__init__.py | buanzo/deadset | e9ee9017da8a45f20371bc33d393757509a32815 | [
"Apache-2.0"
] | null | null | null | deadset/__init__.py | buanzo/deadset | e9ee9017da8a45f20371bc33d393757509a32815 | [
"Apache-2.0"
] | null | null | null | deadset/__init__.py | buanzo/deadset | e9ee9017da8a45f20371bc33d393757509a32815 | [
"Apache-2.0"
] | null | null | null | from .deadset import DeadSet
| 14.5 | 28 | 0.827586 | 4 | 29 | 6 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 29 | 1 | 29 | 29 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
6fdce1ec0d53d46241f02b67e214a77ec6d89e22 | 373 | py | Python | Lessons/L7.py | sometheasiekswx/SopheanyPythonLessons | b2e9faf0d7634e56cd7b71148f1821915a0c6157 | [
"MIT"
] | null | null | null | Lessons/L7.py | sometheasiekswx/SopheanyPythonLessons | b2e9faf0d7634e56cd7b71148f1821915a0c6157 | [
"MIT"
] | null | null | null | Lessons/L7.py | sometheasiekswx/SopheanyPythonLessons | b2e9faf0d7634e56cd7b71148f1821915a0c6157 | [
"MIT"
] | null | null | null | def remove_all_occurences(list, remove_value):
return None
def is_leap(list, remove_value):
return None
def add(a, b):
return None
def g(list, remove_value):
return None
def t(list, remove_value):
return None
print(2 in [1,2])
def if_funtion():
if 2 in [1,2]:
return True
print(if_funtion())
if 2 in [1,2]:
print(True)
| 12.032258 | 46 | 0.630027 | 62 | 373 | 3.645161 | 0.354839 | 0.221239 | 0.265487 | 0.371681 | 0.623894 | 0.513274 | 0.141593 | 0 | 0 | 0 | 0 | 0.032491 | 0.257373 | 373 | 30 | 47 | 12.433333 | 0.783394 | 0 | 0 | 0.411765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.352941 | false | 0 | 0 | 0.294118 | 0.705882 | 0.176471 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
6fe3e6e1c1d81894b3e7824fde948b7478361134 | 205 | py | Python | platforms/winpack_dldt/2020.4/patch.config.py | lefatoum2/opencv | f7cab121fe2954c67b343b3b7805e1c092812093 | [
"Apache-2.0"
] | 56,632 | 2016-07-04T16:36:08.000Z | 2022-03-31T18:38:14.000Z | platforms/winpack_dldt/2020.4/patch.config.py | yusufm423/opencv | 6a2077cbd8a8a0d8cbd3e0e8c3ca239f17e6c067 | [
"Apache-2.0"
] | 13,593 | 2016-07-04T13:59:03.000Z | 2022-03-31T21:04:51.000Z | platforms/winpack_dldt/2020.4/patch.config.py | yusufm423/opencv | 6a2077cbd8a8a0d8cbd3e0e8c3ca239f17e6c067 | [
"Apache-2.0"
] | 54,986 | 2016-07-04T14:24:38.000Z | 2022-03-31T22:51:18.000Z | applyPatch('20200701-dldt-disable-unused-targets.patch')
applyPatch('20200413-dldt-pdb.patch')
applyPatch('20200604-dldt-disable-multidevice.patch')
applyPatch('20201005-dldt-fix-cldnn-compilation.patch')
| 41 | 56 | 0.819512 | 25 | 205 | 6.72 | 0.6 | 0.267857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.159204 | 0.019512 | 205 | 4 | 57 | 51.25 | 0.676617 | 0 | 0 | 0 | 0 | 0 | 0.707317 | 0.707317 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
82f546605542a0a5afb059501f4a21e3d5deb8ae | 32 | py | Python | examples/math.sqrt/ex1.py | mcorne/python-by-example | 15339c0909c84b51075587a6a66391100971c033 | [
"MIT"
] | null | null | null | examples/math.sqrt/ex1.py | mcorne/python-by-example | 15339c0909c84b51075587a6a66391100971c033 | [
"MIT"
] | null | null | null | examples/math.sqrt/ex1.py | mcorne/python-by-example | 15339c0909c84b51075587a6a66391100971c033 | [
"MIT"
] | null | null | null | import math
print(math.sqrt(2))
| 10.666667 | 19 | 0.75 | 6 | 32 | 4 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 0.09375 | 32 | 2 | 20 | 16 | 0.793103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
d20adf7da1521f92bd85bf9876efd6ac7cd2aaf1 | 442 | py | Python | river/tree/_attribute_test/attribute_split_suggestion.py | Styren/river | 128a5ffe9f80df85e23d9ae871e02bea6dc9c100 | [
"BSD-3-Clause"
] | 1 | 2020-12-04T18:56:19.000Z | 2020-12-04T18:56:19.000Z | river/tree/_attribute_test/attribute_split_suggestion.py | Styren/river | 128a5ffe9f80df85e23d9ae871e02bea6dc9c100 | [
"BSD-3-Clause"
] | null | null | null | river/tree/_attribute_test/attribute_split_suggestion.py | Styren/river | 128a5ffe9f80df85e23d9ae871e02bea6dc9c100 | [
"BSD-3-Clause"
] | 1 | 2021-01-22T15:18:39.000Z | 2021-01-22T15:18:39.000Z | class AttributeSplitSuggestion:
def __init__(self, split_test, resulting_class_distributions, merit):
self.split_test = split_test
self.resulting_class_distributions = resulting_class_distributions
self.merit = merit
def num_splits(self):
return len(self.resulting_class_distributions)
def resulting_stats_from_split(self, split_idx):
return self.resulting_class_distributions[split_idx]
| 36.833333 | 74 | 0.762443 | 51 | 442 | 6.156863 | 0.333333 | 0.22293 | 0.429936 | 0.296178 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.178733 | 442 | 11 | 75 | 40.181818 | 0.865014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0.222222 | 0.666667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
d22c148027b7667323c6f4986984e395a5fd3ee7 | 189 | py | Python | wakeful/__init__.py | robOcity/wakeful | e3a50649e6208da28feea2fe402f119b0223293d | [
"MIT"
] | null | null | null | wakeful/__init__.py | robOcity/wakeful | e3a50649e6208da28feea2fe402f119b0223293d | [
"MIT"
] | null | null | null | wakeful/__init__.py | robOcity/wakeful | e3a50649e6208da28feea2fe402f119b0223293d | [
"MIT"
] | null | null | null | from . import api_registration
from . import metrics
from . import virus_total
from . import log_munger
from . import ip_address_regex
from . import pipelining
from . import subsample_pair
| 23.625 | 30 | 0.814815 | 27 | 189 | 5.481481 | 0.555556 | 0.472973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 189 | 7 | 31 | 27 | 0.919255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d2471bdcd5d3e09a7b753871cb48c6c118576ee1 | 64 | py | Python | TelegramBot8/Model/__init__.py | AppDevIn/Telegram-Bot8 | 6bed3332154909667e7f2b8f958c9d6b9b01b54c | [
"MIT"
] | null | null | null | TelegramBot8/Model/__init__.py | AppDevIn/Telegram-Bot8 | 6bed3332154909667e7f2b8f958c9d6b9b01b54c | [
"MIT"
] | 14 | 2022-02-06T08:28:52.000Z | 2022-02-25T11:51:24.000Z | TelegramBot8/Model/__init__.py | AppDevIn/TelegramBot | 6bed3332154909667e7f2b8f958c9d6b9b01b54c | [
"MIT"
] | null | null | null | from .Dto import *
from .Response import *
from .Reqest import * | 21.333333 | 23 | 0.734375 | 9 | 64 | 5.222222 | 0.555556 | 0.425532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.171875 | 64 | 3 | 24 | 21.333333 | 0.886792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
d2779e9dca7dafe94a2ee125055f330f3c50cb69 | 14,871 | py | Python | api/users/tests/tests_exporter_roles_and_permissions.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 3 | 2019-05-15T09:30:39.000Z | 2020-04-22T16:14:23.000Z | api/users/tests/tests_exporter_roles_and_permissions.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 85 | 2019-04-24T10:39:35.000Z | 2022-03-21T14:52:12.000Z | api/users/tests/tests_exporter_roles_and_permissions.py | django-doctor/lite-api | 1ba278ba22ebcbb977dd7c31dd3701151cd036bf | [
"MIT"
] | 1 | 2021-01-17T11:12:19.000Z | 2021-01-17T11:12:19.000Z | from django.urls import reverse
from parameterized import parameterized
from rest_framework import status
from api.core import constants
from api.core.constants import ExporterPermissions
from test_helpers.clients import DataTestClient
from api.users.enums import UserType
from api.users.models import Role, Permission, ExporterUser
class RolesAndPermissionsTests(DataTestClient):
def test_create_new_role_with_no_permissions(self):
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
data = {"name": "some role", "permissions": []}
url = reverse("organisations:roles_views", kwargs={"org_pk": self.organisation.id})
response = self.client.post(url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Role.objects.get(name="some role").name, "some role")
def test_get_list_of_all_roles_as_exporter_super_user(self):
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
initial_roles_count = Role.objects.filter(type=UserType.EXPORTER).count()
url = reverse("organisations:roles_views", kwargs={"org_pk": self.organisation.id})
role = Role(name="some", organisation=self.organisation, type=UserType.EXPORTER)
role.save()
response = self.client.get(url, **self.exporter_headers)
response_data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response_data["results"]), initial_roles_count + 1)
def test_edit_a_role(self):
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
role = Role(name="some", organisation=self.organisation, type=UserType.EXPORTER)
role.save()
url = reverse("organisations:role", kwargs={"org_pk": self.organisation.id, "pk": role.id})
data = {"permissions": [ExporterPermissions.ADMINISTER_USERS.name]}
response = self.client.put(url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(
ExporterPermissions.ADMINISTER_USERS.name
in Role.objects.get(id=role.id).permissions.values_list("id", flat=True)
)
def test_cannot_create_role_without_permission(self):
url = reverse("organisations:roles_views", kwargs={"org_pk": self.organisation.id})
data = {"name": "some role", "permissions": []}
initial_roles_count = Role.objects.count()
response = self.client.post(url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Role.objects.all().count(), initial_roles_count)
def test_cannot_edit_role_without_permission(self):
role = Role(name="some", organisation=self.organisation, type=UserType.EXPORTER)
role.save()
url = reverse("organisations:role", kwargs={"org_pk": self.organisation.id, "pk": role.id})
data = {"permissions": [ExporterPermissions.ADMINISTER_USERS.name]}
response = self.client.put(url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(Role.objects.get(id=role.id).permissions.values().count(), 0)
@parameterized.expand(
[
[{"name": "this is a name", "permissions": []}],
[{"name": "ThIs iS A NaMe", "permissions": []}],
[{"name": " this is a name ", "permissions": []}],
]
)
def test_role_name_must_be_unique(self, data):
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
Role(name="this is a name", organisation=self.organisation).save()
initial_roles_count = Role.objects.count()
url = reverse("organisations:roles_views", kwargs={"org_pk": self.organisation.id})
response = self.client.post(url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(Role.objects.all().count(), initial_roles_count)
def test_role_name_not_have_to_be_unique_different_organisations(self):
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
org, _ = self.create_organisation_with_exporter_user()
role_name = "duplicate name"
Role(name=role_name, organisation=org, type=UserType.EXPORTER).save()
data = {"name": role_name, "permissions": []}
url = reverse("organisations:roles_views", kwargs={"org_pk": self.organisation.id})
response = self.client.post(url, data, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Role.objects.filter(name=role_name).count(), 2)
def test_only_see_roles_user_has_all_permissions_for_3_permissions(self):
permissions = [
constants.ExporterPermissions.ADMINISTER_USERS.name,
constants.ExporterPermissions.ADMINISTER_SITES.name,
constants.ExporterPermissions.EXPORTER_ADMINISTER_ROLES.name,
]
user_role = Role(name="new role", organisation=self.organisation)
user_role.permissions.set(permissions)
user_role.save()
self.exporter_user.set_role(self.organisation, user_role)
url = reverse("organisations:roles_views", kwargs={"org_pk": self.organisation.id})
# Create a new role, each with a singular different permission
for permission in Permission.exporter.all():
role = Role(name=str(permission.id), organisation=self.organisation)
role.permissions.set([permission.id])
role.save()
second_role = Role(name="multi permission role", organisation=self.organisation)
second_role.permissions.set(
[
constants.ExporterPermissions.ADMINISTER_USERS.name,
constants.ExporterPermissions.ADMINISTER_SITES.name,
constants.ExporterPermissions.EXPORTER_ADMINISTER_ROLES.name,
]
)
second_role.save()
response = self.client.get(url, **self.exporter_headers)
response_data = response.json()["results"]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response_data), 6)
self.assertIn(str(Role.objects.get(name="multi permission role").id), str(response_data))
self.assertIn(
str(Role.objects.get(name=constants.ExporterPermissions.ADMINISTER_USERS.name).id), str(response_data)
)
self.assertIn(
str(Role.objects.get(name=constants.ExporterPermissions.ADMINISTER_SITES.name).id), str(response_data)
)
self.assertIn(
str(Role.objects.get(name=constants.ExporterPermissions.EXPORTER_ADMINISTER_ROLES.name).id),
str(response_data),
)
def test_only_see_roles_user_has_all_permissions_for_2_permissions(self):
permissions = [
constants.ExporterPermissions.ADMINISTER_USERS.name,
constants.ExporterPermissions.ADMINISTER_SITES.name,
]
user_role = Role(name="new role", organisation=self.organisation)
user_role.permissions.set(permissions)
user_role.save()
self.exporter_user.set_role(self.organisation, user_role)
url = reverse("organisations:roles_views", kwargs={"org_pk": self.organisation.id})
# Create a new role, each with a singular different permission
for permission in Permission.exporter.all():
role = Role(name=str(permission.id), organisation=self.organisation)
role.permissions.set([permission.id])
role.save()
second_role = Role(name="multi permission role", organisation=self.organisation)
second_role.permissions.set(
[
constants.ExporterPermissions.ADMINISTER_USERS.name,
constants.ExporterPermissions.ADMINISTER_SITES.name,
constants.ExporterPermissions.EXPORTER_ADMINISTER_ROLES.name,
]
)
second_role.save()
response = self.client.get(url, **self.exporter_headers)
response_data = response.json()["results"]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response_data), 4)
self.assertIn(
str(Role.objects.get(name=constants.ExporterPermissions.ADMINISTER_USERS.name).id), str(response_data)
)
self.assertIn(
str(Role.objects.get(name=constants.ExporterPermissions.ADMINISTER_SITES.name).id), str(response_data)
)
def test_only_see_roles_user_has_all_permissions_for_1_permission(self):
permissions = [constants.ExporterPermissions.ADMINISTER_USERS.name]
user_role = Role(name="new role", organisation=self.organisation)
user_role.permissions.set(permissions)
user_role.save()
self.exporter_user.set_role(self.organisation, user_role)
url = reverse("organisations:roles_views", kwargs={"org_pk": self.organisation.id})
# Create a new role, each with a singular different permission
for permission in Permission.exporter.all():
role = Role(name=str(permission.id), organisation=self.organisation)
role.permissions.set([permission.id])
role.save()
second_role = Role(name="multi permission role", organisation=self.organisation)
second_role.permissions.set(
[
constants.ExporterPermissions.ADMINISTER_USERS.name,
constants.ExporterPermissions.ADMINISTER_SITES.name,
constants.ExporterPermissions.EXPORTER_ADMINISTER_ROLES.name,
]
)
second_role.save()
response = self.client.get(url, **self.exporter_headers)
response_data = response.json()["results"]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response_data), 3)
self.assertIn(
str(Role.objects.get(name=constants.ExporterPermissions.ADMINISTER_USERS.name).id), str(response_data)
)
@parameterized.expand(
[
[
[
constants.ExporterPermissions.ADMINISTER_USERS.name,
constants.ExporterPermissions.ADMINISTER_SITES.name,
constants.ExporterPermissions.EXPORTER_ADMINISTER_ROLES.name,
]
],
[
[
constants.ExporterPermissions.ADMINISTER_USERS.name,
constants.ExporterPermissions.ADMINISTER_SITES.name,
]
],
[[constants.ExporterPermissions.ADMINISTER_USERS.name]],
]
)
def test_only_see_permissions_user_already_has(self, permissions):
user_role = Role(name="new role", organisation=self.organisation)
user_role.permissions.set(permissions)
user_role.save()
self.exporter_user.set_role(self.organisation, user_role)
url = reverse("organisations:permissions")
response = self.client.get(url, **self.exporter_headers)
response_data = response.json()["permissions"]
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response_data), len(permissions))
for permission in permissions:
self.assertIn(permission, [p["id"] for p in response_data])
def test_cannot_change_own_role(self):
user_role = Role(name="new role", organisation=self.organisation)
user_role.permissions.set([constants.ExporterPermissions.ADMINISTER_USERS.name])
user_role.save()
self.exporter_user.set_role(self.organisation, user_role)
response = self.client.put(
reverse("organisations:user", kwargs={"org_pk": self.organisation.id, "user_pk": self.exporter_user.pk},),
data={"role": str(constants.Roles.EXPORTER_DEFAULT_ROLE_ID)},
**self.exporter_headers,
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotEqual(
self.exporter_user.get_role(self.organisation),
Role.objects.get(id=constants.Roles.EXPORTER_DEFAULT_ROLE_ID),
)
def test_cannot_change_another_users_role_to_one_the_request_user_does_not_have_access_to(self):
user_role = Role(name="new role", organisation=self.organisation)
user_role.permissions.set([constants.ExporterPermissions.ADMINISTER_USERS.name])
user_role.save()
second_user_role = Role(name="new role", organisation=self.organisation)
second_user_role.permissions.set(
[constants.ExporterPermissions.ADMINISTER_USERS.name, constants.ExporterPermissions.ADMINISTER_SITES.name]
)
second_user_role.save()
self.exporter_user.set_role(self.organisation, user_role)
second_user = self.create_exporter_user(self.organisation)
response = self.client.put(
reverse("organisations:user", kwargs={"org_pk": self.organisation.id, "user_pk": second_user.pk},),
data={"role": str(second_user_role.id)},
**self.exporter_headers,
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertNotEqual(second_user.get_role(self.organisation), second_user_role)
def test_can_change_another_users_role_to_newly_created_role(self):
user_role = Role(name="new role one", organisation=self.organisation, type=UserType.EXPORTER)
user_role.permissions.set([constants.ExporterPermissions.ADMINISTER_USERS.name])
user_role.save()
second_user_role = Role(name="new role two", organisation=self.organisation, type=UserType.EXPORTER)
second_user_role.save()
self.exporter_user.set_role(self.organisation, user_role)
second_user = self.create_exporter_user(self.organisation)
response = self.client.put(
reverse("organisations:user", kwargs={"org_pk": self.organisation.id, "user_pk": second_user.pk},),
data={"role": second_user_role.id},
**self.exporter_headers,
)
response_body = response.json()
second_user.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotEqual(second_user.get_role(self.organisation), user_role)
self.assertEqual(response_body["user_relationship"]["role"], str(second_user_role.id))
self.assertEqual(response_body["user_relationship"]["status"]["key"], "Active")
self.assertEqual(response_body["user_relationship"]["status"]["value"], "Active")
| 45.898148 | 118 | 0.684957 | 1,676 | 14,871 | 5.840692 | 0.082936 | 0.08009 | 0.10093 | 0.073756 | 0.859843 | 0.838492 | 0.809582 | 0.780672 | 0.76065 | 0.757381 | 0 | 0.004318 | 0.205837 | 14,871 | 323 | 119 | 46.040248 | 0.824555 | 0.012239 | 0 | 0.596154 | 0 | 0 | 0.065305 | 0.015322 | 0 | 0 | 0 | 0 | 0.15 | 1 | 0.053846 | false | 0 | 0.030769 | 0 | 0.088462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
963df734fe62085e433674522830ce731e3150a9 | 72 | py | Python | Hero2Vector/utils/__init__.py | diorw/dota_analyze_and_prediction | 3f5a6f21ba74fe065bbb5cc2fa8f512986023249 | [
"MIT"
] | null | null | null | Hero2Vector/utils/__init__.py | diorw/dota_analyze_and_prediction | 3f5a6f21ba74fe065bbb5cc2fa8f512986023249 | [
"MIT"
] | null | null | null | Hero2Vector/utils/__init__.py | diorw/dota_analyze_and_prediction | 3f5a6f21ba74fe065bbb5cc2fa8f512986023249 | [
"MIT"
] | null | null | null | from . import dataset
from . import evaluation
from . import prediction
| 18 | 24 | 0.791667 | 9 | 72 | 6.333333 | 0.555556 | 0.526316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 72 | 3 | 25 | 24 | 0.95 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
966b2c217d16cf2b010512b58092a98162fe8b22 | 335 | py | Python | kaori/plugins/gacha/engine/test_lib.py | austinpray/kaori | b21c4146b9d0d27b87015cff0768138568a12e9c | [
"MIT"
] | 3 | 2020-05-04T03:43:20.000Z | 2020-12-03T22:34:47.000Z | kaori/plugins/gacha/engine/test_lib.py | austinpray/kaori | b21c4146b9d0d27b87015cff0768138568a12e9c | [
"MIT"
] | 287 | 2020-04-21T02:39:47.000Z | 2022-03-28T13:11:59.000Z | kaori/plugins/gacha/engine/test_lib.py | austinpray/kaori | b21c4146b9d0d27b87015cff0768138568a12e9c | [
"MIT"
] | 1 | 2020-10-22T00:20:43.000Z | 2020-10-22T00:20:43.000Z | from .core import *
def test_humanize():
assert humanize_nature(baby, clown) == 'baby clown'
assert humanize_nature(clown, baby) == 'clown baby'
assert humanize_nature(baby, cursed) == 'cursed baby'
assert humanize_nature(cursed, baby) == 'cursed baby'
assert humanize_nature(feral, cursed) == 'feral and cursed'
| 33.5 | 63 | 0.704478 | 42 | 335 | 5.47619 | 0.309524 | 0.304348 | 0.434783 | 0.313043 | 0.26087 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.176119 | 335 | 9 | 64 | 37.222222 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0.173134 | 0 | 0 | 0 | 0 | 0 | 0.714286 | 1 | 0.142857 | true | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
73dcf465a951f71e15b0e096ed7ce983e9914120 | 205 | py | Python | transactions.py | hendrikschneider/python-raft | 78e79fee2327e94c1f8f9352adf92dbad80c56e2 | [
"MIT"
] | null | null | null | transactions.py | hendrikschneider/python-raft | 78e79fee2327e94c1f8f9352adf92dbad80c56e2 | [
"MIT"
] | null | null | null | transactions.py | hendrikschneider/python-raft | 78e79fee2327e94c1f8f9352adf92dbad80c56e2 | [
"MIT"
] | null | null | null | import json
class Transaction(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __repr__(self):
return "<Transaction: {}>".format(json.dumps(self.kwargs))
| 20.5 | 67 | 0.614634 | 22 | 205 | 5.363636 | 0.590909 | 0.254237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.24878 | 205 | 9 | 68 | 22.777778 | 0.766234 | 0 | 0 | 0 | 0 | 0 | 0.086735 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.166667 | 0.166667 | 0.833333 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
fb495c6a4489de17e315812136bcf2880ea2a6cb | 96 | py | Python | tests/algebra/test_phi.py | arjunbharti/PyRival | c0c8eabdba8a213e008039c1a3d1f9127874832c | [
"Apache-2.0"
] | 1 | 2021-05-29T04:27:52.000Z | 2021-05-29T04:27:52.000Z | tests/algebra/test_phi.py | Mukundan314/PyRival | 49c32c1f41e0257bef0f6ac04c415d2b0ff89248 | [
"Apache-2.0"
] | null | null | null | tests/algebra/test_phi.py | Mukundan314/PyRival | 49c32c1f41e0257bef0f6ac04c415d2b0ff89248 | [
"Apache-2.0"
] | 1 | 2020-06-07T14:30:13.000Z | 2020-06-07T14:30:13.000Z | import pyrival.algebra
def test_phi(phi):
assert pyrival.algebra.phi(len(phi) - 1) == phi
| 16 | 51 | 0.697917 | 15 | 96 | 4.4 | 0.6 | 0.424242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0125 | 0.166667 | 96 | 5 | 52 | 19.2 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fb54aa5ff36bbafff68ad625ffd787e4a6b2ca15 | 30 | py | Python | config/models.py | godwon2095/algorithm_project | c8140f75a14535592cac06a62c480be13c45d7c1 | [
"MIT"
] | null | null | null | config/models.py | godwon2095/algorithm_project | c8140f75a14535592cac06a62c480be13c45d7c1 | [
"MIT"
] | null | null | null | config/models.py | godwon2095/algorithm_project | c8140f75a14535592cac06a62c480be13c45d7c1 | [
"MIT"
] | null | null | null | from django.db.models import * | 30 | 30 | 0.8 | 5 | 30 | 4.8 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 30 | 1 | 30 | 30 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fb84eb08bf79e58288f03e56118cb4d0a58b9e33 | 28 | py | Python | adet/structures/__init__.py | manusheoran/AdelaiDet_DA | 04f0843c6be8e436716783300abcba715d560853 | [
"BSD-2-Clause"
] | 2,597 | 2020-03-15T06:01:23.000Z | 2022-03-31T18:21:31.000Z | adet/structures/__init__.py | manusheoran/AdelaiDet_DA | 04f0843c6be8e436716783300abcba715d560853 | [
"BSD-2-Clause"
] | 467 | 2020-03-16T11:31:52.000Z | 2022-03-31T08:50:15.000Z | adet/structures/__init__.py | manusheoran/AdelaiDet_DA | 04f0843c6be8e436716783300abcba715d560853 | [
"BSD-2-Clause"
] | 584 | 2020-03-15T05:53:40.000Z | 2022-03-26T02:56:30.000Z | from .beziers import Beziers | 28 | 28 | 0.857143 | 4 | 28 | 6 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 28 | 1 | 28 | 28 | 0.96 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fb9cf806824d01424153531869961e18ccb2739b | 73 | py | Python | cpp/neneshogi_cpp/__init__.py | select766/neneshogi | f355745844fb4c1add3e10083783d849be8ab80f | [
"MIT"
] | 6 | 2017-11-13T13:07:44.000Z | 2021-10-07T03:48:43.000Z | cpp/neneshogi_cpp/__init__.py | select766/neneshogi | f355745844fb4c1add3e10083783d849be8ab80f | [
"MIT"
] | null | null | null | cpp/neneshogi_cpp/__init__.py | select766/neneshogi | f355745844fb4c1add3e10083783d849be8ab80f | [
"MIT"
] | null | null | null | # to enable pycharm completion
from neneshogi_cpp.neneshogi_cpp import *
| 24.333333 | 41 | 0.835616 | 10 | 73 | 5.9 | 0.8 | 0.40678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123288 | 73 | 2 | 42 | 36.5 | 0.921875 | 0.383562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
fba5a89bad069c8f853fb4198a8831a83ee2ed1e | 116 | py | Python | backend/backend/contact/tests.py | epm0dev/personal-website | 6090f7b1c82e36625939edaa47a846abc10e70a0 | [
"MIT"
] | 1 | 2020-12-29T15:52:37.000Z | 2020-12-29T15:52:37.000Z | backend/backend/contact/tests.py | epm0dev/personal-website | 6090f7b1c82e36625939edaa47a846abc10e70a0 | [
"MIT"
] | 2 | 2021-04-08T20:44:41.000Z | 2021-06-09T18:28:45.000Z | backend/backend/contact/tests.py | epm0dev/personal-website | 6090f7b1c82e36625939edaa47a846abc10e70a0 | [
"MIT"
] | null | null | null | from django.test import TestCase
class ContactFormModelTestCase(TestCase):
"""
TODO Docs
"""
pass
| 12.888889 | 41 | 0.663793 | 11 | 116 | 7 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 116 | 8 | 42 | 14.5 | 0.885057 | 0.077586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 6 |
fbda92671ecda009408026d3fe4be856e615b23c | 21 | py | Python | apytl/__init__.py | anadolski/apytl | eab20a143e73aef2187c552021ed4e02744f39fb | [
"BSD-3-Clause"
] | 1 | 2022-01-21T18:36:20.000Z | 2022-01-21T18:36:20.000Z | esbmc_wr/bar/__init__.py | thalestas/esbmc-wr | b10521a1f36e3c8c08799c05bed710263d7c1df6 | [
"Apache-2.0"
] | 4 | 2021-06-01T20:50:46.000Z | 2022-01-04T04:30:24.000Z | apytl/__init__.py | anadolski/apytl | eab20a143e73aef2187c552021ed4e02744f39fb | [
"BSD-3-Clause"
] | 1 | 2022-01-31T03:47:33.000Z | 2022-01-31T03:47:33.000Z | from .bar import Bar
| 10.5 | 20 | 0.761905 | 4 | 21 | 4 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.190476 | 21 | 1 | 21 | 21 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
83704c3112563bf59ba4a3c225724f8bd4843e3e | 283 | py | Python | erudite/components/__init__.py | rerobins/rho_erudite | 154347e05da6030048840a060eaf5ae62ee8aec7 | [
"BSD-3-Clause"
] | null | null | null | erudite/components/__init__.py | rerobins/rho_erudite | 154347e05da6030048840a060eaf5ae62ee8aec7 | [
"BSD-3-Clause"
] | null | null | null | erudite/components/__init__.py | rerobins/rho_erudite | 154347e05da6030048840a060eaf5ae62ee8aec7 | [
"BSD-3-Clause"
] | null | null | null | from erudite.components.knowledge_provider import knowledge_provider
from erudite.components.search_handler import search_handler
from sleekxmpp.plugins.base import register_plugin
def load_components():
register_plugin(knowledge_provider)
register_plugin(search_handler)
| 28.3 | 68 | 0.858657 | 34 | 283 | 6.852941 | 0.441176 | 0.218884 | 0.180258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.095406 | 283 | 9 | 69 | 31.444444 | 0.910156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | true | 0 | 0.5 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
837e9ce14e14533cf59aa53bb71ea56443afdd25 | 38 | py | Python | deeppavlov/skills/pattern_matching_skill/__init__.py | ineersa/DeepPavlov | 8200bf9a0f0b378baad4ee0eb75b59453f516004 | [
"Apache-2.0"
] | 3 | 2020-04-16T04:25:10.000Z | 2021-05-07T23:04:43.000Z | deeppavlov/skills/pattern_matching_skill/__init__.py | ineersa/DeepPavlov | 8200bf9a0f0b378baad4ee0eb75b59453f516004 | [
"Apache-2.0"
] | 12 | 2020-01-28T22:14:04.000Z | 2022-02-10T00:10:17.000Z | deeppavlov/skills/pattern_matching_skill/__init__.py | ineersa/DeepPavlov | 8200bf9a0f0b378baad4ee0eb75b59453f516004 | [
"Apache-2.0"
] | 1 | 2022-02-08T14:41:28.000Z | 2022-02-08T14:41:28.000Z | from .pattern_matching_skill import *
| 19 | 37 | 0.842105 | 5 | 38 | 6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 38 | 1 | 38 | 38 | 0.882353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
83863b2698b311b33fceeb045ed84f092af278db | 47 | py | Python | aiotruenas_client/__init__.py | MatthewFlamm/aiotruenas-client | 52977b2fcb044de7b580de0099d758d6fda45dc3 | [
"MIT"
] | null | null | null | aiotruenas_client/__init__.py | MatthewFlamm/aiotruenas-client | 52977b2fcb044de7b580de0099d758d6fda45dc3 | [
"MIT"
] | null | null | null | aiotruenas_client/__init__.py | MatthewFlamm/aiotruenas-client | 52977b2fcb044de7b580de0099d758d6fda45dc3 | [
"MIT"
] | null | null | null | from .websockets.machine import CachingMachine
| 23.5 | 46 | 0.87234 | 5 | 47 | 8.2 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085106 | 47 | 1 | 47 | 47 | 0.953488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
839c32104fff5672a5fd2bc247d880aebcb6104c | 175 | py | Python | src/prefect/environments/__init__.py | skyline-ai/prefect | 92430f2f91215d6c27d92ad67df67ccd639e587c | [
"Apache-2.0"
] | null | null | null | src/prefect/environments/__init__.py | skyline-ai/prefect | 92430f2f91215d6c27d92ad67df67ccd639e587c | [
"Apache-2.0"
] | null | null | null | src/prefect/environments/__init__.py | skyline-ai/prefect | 92430f2f91215d6c27d92ad67df67ccd639e587c | [
"Apache-2.0"
] | null | null | null | from prefect.environments.execution import (
Environment,
LocalEnvironment,
RemoteEnvironment,
)
from prefect.environments.execution.cloud import CloudEnvironment
| 25 | 65 | 0.805714 | 15 | 175 | 9.4 | 0.666667 | 0.156028 | 0.326241 | 0.453901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137143 | 175 | 6 | 66 | 29.166667 | 0.933775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
83c66ef1afdb8ae7090d0075b73b2441f50a698c | 21 | py | Python | bareditor/__init__.py | mrakitin/bareditor | be944acb5d65f06ea9b9dd14eeecd1f89299cecf | [
"BSD-3-Clause"
] | null | null | null | bareditor/__init__.py | mrakitin/bareditor | be944acb5d65f06ea9b9dd14eeecd1f89299cecf | [
"BSD-3-Clause"
] | null | null | null | bareditor/__init__.py | mrakitin/bareditor | be944acb5d65f06ea9b9dd14eeecd1f89299cecf | [
"BSD-3-Clause"
] | 1 | 2019-10-13T01:42:16.000Z | 2019-10-13T01:42:16.000Z | import wx
print('ok') | 10.5 | 11 | 0.714286 | 4 | 21 | 3.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.095238 | 21 | 2 | 11 | 10.5 | 0.789474 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 6 |
83ceff064e4eba61a2d9c84aefc5071dfe535f24 | 6,062 | py | Python | tests/api/v1/test_export_control.py | redhat-cip/dci-control-server | 6dee30e7b8770fde2466f2b09554d299a3f3db4d | [
"Apache-2.0"
] | 17 | 2016-09-02T09:21:29.000Z | 2021-09-27T11:33:58.000Z | tests/api/v1/test_export_control.py | redhat-cip/dci-control-server | 6dee30e7b8770fde2466f2b09554d299a3f3db4d | [
"Apache-2.0"
] | 80 | 2015-12-09T09:29:26.000Z | 2021-01-06T08:24:22.000Z | tests/api/v1/test_export_control.py | redhat-cip/dci-control-server | 6dee30e7b8770fde2466f2b09554d299a3f3db4d | [
"Apache-2.0"
] | 10 | 2015-09-29T21:34:53.000Z | 2021-09-27T11:34:01.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from dci.common import utils
from dci.stores.swift import Swift
SWIFT = 'dci.stores.swift.Swift'
# team_user_id is subscribing to topic_user_id
def test_topics_export_control_true(user, epm, team_user_id, topic_user_id):
topic = epm.get('/api/v1/topics/%s' % topic_user_id).data['topic']
res = epm.post('/api/v1/products/%s/teams' % topic['product_id'],
data={'team_id': team_user_id})
assert res.status_code == 201
epm.put('/api/v1/topics/%s' % topic_user_id,
data={'export_control': True},
headers={'If-match': topic['etag']})
topic = epm.get('/api/v1/topics/%s' % topic_user_id).data['topic']
assert topic['export_control'] is True
# team_user_id is associated to the product and the topic is exported
# then it should have access to the topic's components
assert user.get('/api/v1/topics/%s/components' % topic_user_id).status_code == 200 # noqa
def test_topics_export_control_false(user, admin, team_user_id, topic_user_id):
topic = admin.get('/api/v1/topics/%s' % topic_user_id).data['topic']
assert topic['export_control'] is False
assert user.get('/api/v1/topics/%s/components' % topic_user_id).status_code == 200 # noqa
# team_user_id is no associated to the product nor to the topic
admin.delete('/api/v1/topics/%s/teams/%s' % (topic_user_id, team_user_id))
assert user.get('/api/v1/topics/%s/components' % topic_user_id).status_code == 401 # noqa
def test_components_export_control_true(user, epm, team_user_id,
topic_user_id, components_user_ids):
topic = epm.get('/api/v1/topics/%s' % topic_user_id).data['topic']
res = epm.post('/api/v1/products/%s/teams' % topic['product_id'],
data={'team_id': team_user_id})
assert res.status_code == 201
epm.put('/api/v1/topics/%s' % topic_user_id,
data={'export_control': True},
headers={'If-match': topic['etag']})
topic = epm.get('/api/v1/topics/%s' % topic_user_id).data['topic']
assert topic['export_control'] is True
with mock.patch(SWIFT, spec=Swift) as mock_swift:
mockito = mock.MagicMock()
mockito.get.return_value = ["test", six.StringIO("lollollel")]
head_result = {
'etag': utils.gen_etag(),
'content-type': "stream",
'content-length': 1
}
mockito.head.return_value = head_result
mock_swift.return_value = mockito
url = '/api/v1/components/%s/files' % components_user_ids[0]
c_file = epm.post(url, data='lol')
c_file_1_id = c_file.data['component_file']['id']
# team_user_id is not subscribing to topic_user_id but it's
# associated to the product thus it can access the topic's components
assert user.get('/api/v1/components/%s' % components_user_ids[0]).status_code == 200 # noqa
assert user.get('/api/v1/components/%s/files' % components_user_ids[0]).status_code == 200 # noqa
assert user.get('/api/v1/components/%s/files/%s' % (components_user_ids[0], c_file_1_id)).status_code == 200 # noqa
assert user.get('/api/v1/components/%s/files/%s/content' % (components_user_ids[0], c_file_1_id)).status_code == 200 # noqa
def test_components_export_control_false(user, epm, team_user_id,
topic_user_id, components_user_ids): # noqa
topic = epm.get('/api/v1/topics/%s' % topic_user_id).data['topic']
res = epm.post('/api/v1/products/%s/teams' % topic['product_id'],
data={'team_id': team_user_id})
assert res.status_code == 201
with mock.patch(SWIFT, spec=Swift) as mock_swift:
mockito = mock.MagicMock()
mockito.get.return_value = ["test", six.StringIO("lollollel")]
head_result = {
'etag': utils.gen_etag(),
'content-type': "stream",
'content-length': 1
}
mockito.head.return_value = head_result
mock_swift.return_value = mockito
url = '/api/v1/components/%s/files' % components_user_ids[0]
c_file = epm.post(url, data='lol')
c_file_1_id = c_file.data['component_file']['id']
assert topic['export_control'] is False
assert user.get('/api/v1/components/%s' % components_user_ids[0]).status_code == 200 # noqa
assert user.get('/api/v1/components/%s/files' % components_user_ids[0]).status_code == 200 # noqa
assert user.get('/api/v1/components/%s/files/%s' % (components_user_ids[0], c_file_1_id)).status_code == 200 # noqa
assert user.get('/api/v1/components/%s/files/%s/content' % (components_user_ids[0], c_file_1_id)).status_code == 200 # noqa
# team_user_id is associated to the product but not to the topic,
# since the topic is not exported the user doesn't have the access
epm.delete('/api/v1/topics/%s/teams/%s' % (topic_user_id, team_user_id)) # noqa
assert user.get('/api/v1/components/%s' % components_user_ids[0]).status_code == 401 # noqa
assert user.get('/api/v1/components/%s/files' % components_user_ids[0]).status_code == 401 # noqa
assert user.get('/api/v1/components/%s/files/%s' % (components_user_ids[0], c_file_1_id)).status_code == 401 # noqa
assert user.get('/api/v1/components/%s/files/%s/content' % (components_user_ids[0], c_file_1_id)).status_code == 401 # noqa
| 48.111111 | 132 | 0.658364 | 907 | 6,062 | 4.190739 | 0.16538 | 0.052092 | 0.044199 | 0.063141 | 0.790318 | 0.768482 | 0.756906 | 0.746383 | 0.730597 | 0.714286 | 0 | 0.023366 | 0.202243 | 6,062 | 125 | 133 | 48.496 | 0.762614 | 0.187067 | 0 | 0.759494 | 0 | 0 | 0.224268 | 0.129937 | 0 | 0 | 0 | 0 | 0.278481 | 1 | 0.050633 | false | 0 | 0.050633 | 0 | 0.101266 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
83d024dfa0ce33ccf4bf0fc4f6201a88e3798681 | 1,139 | py | Python | app/bin/dltk/core/logging/brancher.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 11 | 2020-10-13T05:27:59.000Z | 2021-09-23T02:56:32.000Z | app/bin/dltk/core/logging/brancher.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 48 | 2020-10-15T09:53:36.000Z | 2021-07-05T15:33:24.000Z | app/bin/dltk/core/logging/brancher.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 4 | 2020-12-04T08:51:35.000Z | 2022-03-27T09:42:20.000Z | import logging
__all__ = ["BranchLogger"]
class BranchLogger(object):
logger_a = None
logger_b = None
def __init__(self, logger_a, logger_b):
self.logger_a = logger_a
self.logger_b = logger_b
def log(self, level, message, *args, **kwargs):
self.logger_a.log(
level,
message,
*args,
**kwargs
)
return self.logger_b.log(
level,
message,
*args,
**kwargs
)
def debug(self, message, *args, **kwargs):
return self.log(logging.DEBUG, message, *args, **kwargs)
def info(self, message, *args, **kwargs):
return self.log(logging.INFO, message, *args, **kwargs)
def warning(self, message, *args, **kwargs):
return self.log(logging.WARNING, message, *args, **kwargs)
# Alias warn to warning
warn = warning
def error(self, message, *args, **kwargs):
return self.log(logging.ERROR, message, *args, **kwargs)
def critical(self, message, *args, **kwargs):
return self.log(logging.CRITICAL, message, *args, **kwargs)
| 25.311111 | 67 | 0.574188 | 130 | 1,139 | 4.892308 | 0.207692 | 0.224843 | 0.347484 | 0.216981 | 0.416667 | 0.322327 | 0.322327 | 0.322327 | 0 | 0 | 0 | 0 | 0.29763 | 1,139 | 44 | 68 | 25.886364 | 0.795 | 0.018437 | 0 | 0.25 | 0 | 0 | 0.010753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.21875 | false | 0 | 0.03125 | 0.15625 | 0.5625 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
83e564d54cee4d6f27f4fc3b4057304dfd528cde | 121 | py | Python | module/Interface/__init__.py | Phosphophyllite2018/Phosphophyllite | 72602b24db012d97e0c9c245c1fa914e7442c2ff | [
"MIT"
] | null | null | null | module/Interface/__init__.py | Phosphophyllite2018/Phosphophyllite | 72602b24db012d97e0c9c245c1fa914e7442c2ff | [
"MIT"
] | null | null | null | module/Interface/__init__.py | Phosphophyllite2018/Phosphophyllite | 72602b24db012d97e0c9c245c1fa914e7442c2ff | [
"MIT"
] | null | null | null | from . import BlogInterface
from . import ArticleInterface
from . import MessageInterface
from . import MarkdownInterface | 30.25 | 31 | 0.842975 | 12 | 121 | 8.5 | 0.5 | 0.392157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123967 | 121 | 4 | 31 | 30.25 | 0.962264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f7c42ae785d8e74b8ab6e6bf63efb4f4928d67a8 | 170 | py | Python | okonf/__init__.py | hoh/Okonf | 26b3629b20504bc1c4ee51f054df6a59e54427ae | [
"Apache-2.0"
] | 1 | 2018-03-20T14:55:41.000Z | 2018-03-20T14:55:41.000Z | okonf/__init__.py | hoh/Okonf | 26b3629b20504bc1c4ee51f054df6a59e54427ae | [
"Apache-2.0"
] | null | null | null | okonf/__init__.py | hoh/Okonf | 26b3629b20504bc1c4ee51f054df6a59e54427ae | [
"Apache-2.0"
] | null | null | null | import okonf.connectors
import okonf.facts
import okonf.utils
from okonf.facts.multiple import Sequence, Collection
assert Sequence
assert Collection
assert okonf.utils
| 18.888889 | 53 | 0.852941 | 23 | 170 | 6.304348 | 0.434783 | 0.227586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.105882 | 170 | 8 | 54 | 21.25 | 0.953947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.428571 | 1 | 0 | true | 0 | 0.571429 | 0 | 0.571429 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
f7d3d92d3403b5c886e375192eeee54c8bb0155c | 25,556 | py | Python | mrpy/spatial_operators/ctr_poly/2nd_order_ctr_finite_diff/divergence.py | marc-nguessan/mrpy | 6fb0bce485234a45bb863f71bc2bdf0a22014de3 | [
"BSD-3-Clause"
] | 2 | 2020-01-06T10:48:44.000Z | 2020-01-09T20:07:08.000Z | mrpy/spatial_operators/ctr_poly/2nd_order_ctr_finite_diff/divergence.py | marc-nguessan/mrpy | 6fb0bce485234a45bb863f71bc2bdf0a22014de3 | [
"BSD-3-Clause"
] | 1 | 2020-01-09T20:08:50.000Z | 2020-01-09T20:11:20.000Z | mrpy/spatial_operators/ctr_poly/2nd_order_ctr_finite_diff/divergence.py | marc-nguessan/mrpy | 6fb0bce485234a45bb863f71bc2bdf0a22014de3 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, division
#!!!!!!!! NEED TO BE UPDATED TO TAKE THE SAME FORM AS OPERATORS IN HAAR !!!!!!!!
"""This module is used to compute the divergence operator in the x-direction.
The procedure "create_matrix" returns the matrix representing the linear
combination of this operation on a cartesian grid representation of a variable.
Since the spatial operator depends on the specific boundary conditions applied
to the computed variable, this matrix depends on the boundary conditions.
The procedure mesh.bc_compatbile_local_indexes is used to return the right
indexes depending on the boundary conditions. It returns "None" if there is no
real node corresponding to the input indexes. Since we loop on the real leaves,
if this procedure returns "None" at a specific boundary, we know in which part
of the space we are.
The procedure "create_bc_scalar" returns an array of the values needed to
complete the computation of the spatial operation on the meshes located at the boundary
of the domain. We assume that the type and the values of the variable at the
boundray do not change with time, so that this array is built with the type of
boundary condition applied to the varialbe computed, and the values at the
north, south, east and west boundaries of the variable.
...
...
"""
import petsc4py.PETSc as petsc
from six.moves import range
import config as cfg
from mrpy.mr_utils import mesh
from mrpy.mr_utils import op
import numpy as np
import math
import importlib
from .matrix_aux import matrix_add
#!!!!!!! penser a rajouter un mr_bc_scalar !!!!!!!!
def create_matrix(tree, axis, vec_aux=None):
matrix = petsc.Mat().create()
number_of_rows = tree.number_of_leaves
size_row = (number_of_rows, number_of_rows)
size_col = (number_of_rows, number_of_rows)
matrix.setSizes((size_row, size_col))
matrix.setUp()
# matrix = np.zeros(shape=(number_of_rows, number_of_rows), dtype=np.float)
if vec_aux is None:
vec_aux = petsc.Vec().create()
vec_aux.setSizes(number_of_rows)
vec_aux.set(1)
if axis == 0:
for row in range(number_of_rows):
index = tree.tree_leaves[row]
level = tree.nlevel[index]
i = tree.nindex_x[index]
j = tree.nindex_y[index]
k = tree.nindex_z[index]
dx = tree.ndx[index]
dy = tree.ndy[index]
dz = tree.ndz[index]
# left flux
if mesh.bc_compatible_local_indexes(tree, level, i-1, j, k) is not None:
i_left, j_left, k_left = mesh.bc_compatible_local_indexes(tree, level, i-1, j, k)
index_left = mesh.z_curve_index(tree.dimension, level, i_left, j_left, k_left)
if index_left in tree.tree_nodes and tree.nisleaf[index_left] \
or index_left not in tree.tree_nodes:
# the finest level for the left flux is the node's level
if tree.dimension == 2:
matrix_add(tree, matrix, row, -(dy)/(2*(dx*dy)), level, i, j, k)
matrix_add(tree, matrix, row, -(dy)/(2*(dx*dy)), level, i_left, j_left, k_left)
elif tree.dimension == 3:
matrix_add(tree, matrix, row, -(dy*dz)/(2*(dx*dy*dz)), level, i, j, k)
matrix_add(tree, matrix, row, -(dy*dz)/(2*(dx*dy*dz)), level, i_left, j_left, k_left)
else:
# the finest level for the left flux is the level of the node's children
#if tree.dimension == 1:
# matrix_add(tree, matrix, row, -1./((dx/2.)*2.), level+1, 2*i, 2*j, 2*k)
# matrix_add(tree, matrix, row, -1./((dx/2.)*2.), level+1, 2*i_left+1, 2*j, 2*k)
#REVOIR LA DIMENSION 1 !!!
if tree.dimension == 2:
for n in range(2):
matrix_add(tree, matrix, row, -(dy/2)/(2*(dx*dy)), level+1, 2*i, 2*j+n, 2*k)
matrix_add(tree, matrix, row, -(dy/2)/(2*(dx*dy)), level+1, 2*i_left+1, 2*j+n, 2*k)
elif tree.dimension == 3:
for o in range(2):
for n in range(2):
matrix_add(tree, matrix, row, -((dy/2)*(dz/2))/(2*(dx*dy*dz)), level+1, 2*i, 2*j+n, 2*k+o)
matrix_add(tree, matrix, row, -((dy/2)*(dz/2))/(2*(dx*dy*dz)), level+1, 2*i_left+1, 2*j+n, 2*k+o)
elif tree.bc["west"][0] == "dirichlet":
#the left flux depends only on the boundary condition scalar
pass
elif tree.bc["west"][0] == "neumann":
# the finest level for the left flux is the node's level; this node
# receives a second contribution because of the boundary condition
if tree.dimension == 2:
matrix_add(tree, matrix, row, -(dy)/((dx*dy)), level, i, j, k)
elif tree.dimension == 3:
matrix_add(tree, matrix, row, -(dy*dz)/((dx*dy*dz)), level, i, j, k)
# right flux
if mesh.bc_compatible_local_indexes(tree, level, i+1, j, k) is not None:
i_right, j_right, k_right = mesh.bc_compatible_local_indexes(tree, level, i+1, j, k)
index_right = mesh.z_curve_index(tree.dimension, level, i_right, j_right, k_right)
if index_right in tree.tree_nodes and tree.nisleaf[index_right] \
or index_right not in tree.tree_nodes:
# the finest level for the right flux is the node's level
if tree.dimension == 2:
matrix_add(tree, matrix, row, dy/(2*(dx*dy)), level, i, j, k)
matrix_add(tree, matrix, row, dy/(2*(dx*dy)), level, i_right, j_right, k_right)
elif tree.dimension == 3:
matrix_add(tree, matrix, row, dy*dz/(2*(dx*dy*dz)), level, i, j, k)
matrix_add(tree, matrix, row, dy*dz/(2*(dx*dy*dz)), level, i_right, j_right, k_right)
else:
# the finest level for the right flux is the level of the node's children
#if tree.dimension == 1:
# matrix_add(tree, matrix, row, 1./((dx/2.)*2.), level+1, 2*i+1, 2*j, 2*k)
# matrix_add(tree, matrix, row, 1./((dx/2.)*2.), level+1, 2*i_right, 2*j, 2*k)
#REVOIR LA DIMENSION 1 !!!
if tree.dimension == 2:
for n in range(2):
matrix_add(tree, matrix, row, (dy/2)/(2*(dx*dy)), level+1, 2*i+1, 2*j+n, 2*k)
matrix_add(tree, matrix, row, (dy/2)/(2*(dx*dy)), level+1, 2*i_right, 2*j+n, 2*k)
elif tree.dimension == 3:
for o in range(2):
for n in range(2):
matrix_add(tree, matrix, row, (dy/2)*(dz/2)/(2*(dx*dy*dz)), level+1, 2*i+1, 2*j+n, 2*k+o)
matrix_add(tree, matrix, row, (dy/2)*(dz/2)/(2*(dx*dy*dz)), level+1, 2*i_right, 2*j+n, 2*k+o)
elif tree.bc["east"][0] == "dirichlet":
#the right flux depends only on the boundary condition scalar
pass
elif tree.bc["east"][0] == "neumann":
# the finest level for the right flux is the node's level; this node
# receives a second contribution because of the boundary condition
if tree.dimension == 2:
matrix_add(tree, matrix, row, (dy)/((dx*dy)), level, i, j, k)
elif tree.dimension == 3:
matrix_add(tree, matrix, row, (dy*dz)/((dx*dy*dz)), level, i, j, k)
matrix.assemble()
return matrix
elif axis == 1:
for row in range(number_of_rows):
index = tree.tree_leaves[row]
i = tree.nindex_x[index]
j = tree.nindex_y[index]
k = tree.nindex_z[index]
level = tree.nlevel[index]
dx = tree.ndx[index]
dy = tree.ndy[index]
dz = tree.ndz[index]
# left flux
if mesh.bc_compatible_local_indexes(tree, level, i, j-1, k) is not None:
i_left, j_left, k_left = mesh.bc_compatible_local_indexes(tree, level, i, j-1, k)
index_left = mesh.z_curve_index(tree.dimension, level, i_left, j_left, k_left)
if index_left in tree.tree_nodes and tree.nisleaf[index_left] \
or index_left not in tree.tree_nodes:
# the finest level for the left flux is the node's level
if tree.dimension == 2:
matrix_add(tree, matrix, row, -(dx)/(2*(dx*dy)), level, i, j, k)
matrix_add(tree, matrix, row, -(dx)/(2*(dx*dy)), level, i_left, j_left, k_left)
elif tree.dimension == 3:
matrix_add(tree, matrix, row, -(dx*dz)/(2*(dx*dy*dz)), level, i, j, k)
matrix_add(tree, matrix, row, -(dx*dz)/(2*(dx*dy*dz)), level, i_left, j_left, k_left)
else:
# the finest level for the left flux is the level of the node's children
#if tree.dimension == 1:
# matrix_add(tree, matrix, row, -1./((dy/2.)*2.), level+1, 2*i, 2*j, 2*k)
# matrix_add(tree, matrix, row, -1./((dy/2.)*2.), level+1, 2*i, 2*j_left+1, 2*k)
#REVOIR LA DIMENSION 1 !!!
if tree.dimension == 2:
for m in range(2):
matrix_add(tree, matrix, row, -(dx/2)/(2*(dx*dy)), level+1, 2*i+m, 2*j, 2*k)
matrix_add(tree, matrix, row, -(dx/2)/(2*(dx*dy)), level+1, 2*i+m, 2*j_left+1, 2*k)
elif tree.dimension == 3:
for o in range(2):
for m in range(2):
matrix_add(tree, matrix, row, -(dx/2)*(dz/2)/(2*(dx*dy*dz)), level+1, 2*i+m, 2*j, 2*k+o)
matrix_add(tree, matrix, row, -(dx/2)*(dz/2)/(2*(dx*dy*dz)), level+1, 2*i+m, 2*j_left+1, 2*k+o)
elif tree.bc["south"][0] == "dirichlet":
#the left flux depends only on the boundary condition scalar
pass
elif tree.bc["south"][0] == "neumann":
# the finest level for the left flux is the node's level; this node
# receives a second contribution because of the boundary condition
if tree.dimension == 2:
matrix_add(tree, matrix, row, -(dx)/((dx*dy)), level, i, j, k)
elif tree.dimension == 3:
matrix_add(tree, matrix, row, -(dx*dz)/((dx*dy*dz)), level, i, j, k)
# right flux
if mesh.bc_compatible_local_indexes(tree, level, i, j+1, k) is not None:
i_right, j_right, k_right = mesh.bc_compatible_local_indexes(tree, level, i, j+1, k)
index_right = mesh.z_curve_index(tree.dimension, level, i_right, j_right, k_right)
if index_right in tree.tree_nodes and tree.nisleaf[index_right] \
or index_right not in tree.tree_nodes:
# the finest level for the right flux is the node's level
if tree.dimension == 2:
matrix_add(tree, matrix, row, dx/(2*(dx*dy)), level, i, j, k)
matrix_add(tree, matrix, row, dx/(2*(dx*dy)), level, i_right, j_right, k_right)
elif tree.dimension == 3:
matrix_add(tree, matrix, row, dx*dz/(2*(dx*dy*dz)), level, i, j, k)
matrix_add(tree, matrix, row, dx*dz/(2*(dx*dy*dz)), level, i_right, j_right, k_right)
else:
# the finest level for the right flux is the level of the node's children
#if tree.dimension == 1:
# matrix_add(tree, matrix, row, 1./((dy/2.)*2.), level+1, 2*i, 2*j+1, 2*k)
# matrix_add(tree, matrix, row, 1./((dy/2.)*2.), level+1, 2*i, 2*j_right, 2*k)
#REVOIR LA DIMENSION 1 !!!
if tree.dimension == 2:
for m in range(2):
matrix_add(tree, matrix, row, (dx/2)/(2*(dx*dy)), level+1, 2*i+m, 2*j+1, 2*k)
matrix_add(tree, matrix, row, (dx/2)/(2*(dx*dy)), level+1, 2*i+m, 2*j_right, 2*k)
elif tree.dimension == 3:
for o in range(2):
for m in range(2):
matrix_add(tree, matrix, row, (dx/2)*(dz/2)/(2*(dx*dy*dz)), level+1, 2*i+m, 2*j+1, 2*k+o)
matrix_add(tree, matrix, row, (dx/2)*(dz/2)/(2*(dx*dy*dz)), level+1, 2*i+m, 2*j_right, 2*k+o)
elif tree.bc["north"][0] == "dirichlet":
#the right flux depends only on the boundary condition scalar
pass
elif tree.bc["north"][0] == "neumann":
# the finest level for the left flux is the node's level; this node
# receives a second contribution because of the boundary condition
if tree.dimension == 2:
matrix_add(tree, matrix, row, (dx)/((dx*dy)), level, i, j, k)
elif tree.dimension == 3:
matrix_add(tree, matrix, row, (dx*dz)/((dx*dy*dz)), level, i, j, k)
matrix.assemble()
return matrix
elif axis == 2:
for row in range(number_of_rows):
index = tree.tree_leaves[row]
i = tree.nindex_x[index]
j = tree.nindex_y[index]
k = tree.nindex_z[index]
level = tree.nlevel[index]
dx = tree.ndx[index]
dy = tree.ndy[index]
dz = tree.ndz[index]
# left flux
if mesh.bc_compatible_local_indexes(tree, level, i, j, k-1) is not None:
i_left, j_left, k_left = mesh.bc_compatible_local_indexes(tree, level, i, j, k-1)
index_left = mesh.z_curve_index(tree.dimension, level, i_left, j_left, k_left)
if index_left in tree.tree_nodes and tree.nisleaf[index_left] \
or index_left not in tree.tree_nodes:
# the finest level for the left flux is the node's level
matrix_add(tree, matrix, row, -(dx*dy)/(2*(dx*dy*dz)), level, i, j, k)
matrix_add(tree, matrix, row, -(dx*dy)/(2*(dx*dy*dz)), level, i_left, j_left, k_left)
else:
# the finest level for the left flux is the level of the node's children
for n in range(2):
for m in range(2):
matrix_add(tree, matrix, row, -(dx/2)*(dy/2)/(2*(dx*dy*dz)), level+1, 2*i+m, 2*j+n, 2*k)
matrix_add(tree, matrix, row, -(dx/2)*(dy/2)/(2*(dx*dy*dz)), level+1, 2*i+m, 2*j+n, 2*k_left+1)
elif tree.bc["back"][0] == "dirichlet":
#the left flux depends only on the boundary condition scalar
pass
elif tree.bc["back"][0] == "neumann":
# the finest level for the left flux is the node's level; this node
# receives a second contribution because of the boundary condition
matrix_add(tree, matrix, row, -(dx*dz)/((dx*dy*dz)), level, i, j, k)
# right flux
if mesh.bc_compatible_local_indexes(tree, level, i, j, k+1) is not None:
i_right, j_right, k_right = mesh.bc_compatible_local_indexes(tree, level, i, j, k+1)
index_right = mesh.z_curve_index(tree.dimension, level, i_right, j_right, k_right)
if index_right in tree.tree_nodes and tree.nisleaf[index_right] \
or index_right not in tree.tree_nodes:
# the finest level for the right flux is the node's level
matrix_add(tree, matrix, row, (dx*dy)/(2*(dx*dy*dz)), level, i, j, k)
matrix_add(tree, matrix, row, (dx*dy)/(2*(dx*dy*dz)), level, i_right, j_right, k_right)
else:
# the finest level for the right flux is the level of the node's children
for n in range(2):
for m in range(2):
matrix_add(tree, matrix, row, (dx/2)*(dy/2)/(2*(dx*dy*dz)), level+1, 2*i+m, 2*j+n, 2*k+1)
matrix_add(tree, matrix, row, (dx/2)*(dy/2)/(2*(dx*dy*dz)), level+1, 2*i+m, 2*j+n, 2*k_right)
elif tree.bc["forth"][0] == "dirichlet":
#the right flux depends only on the boundary condition scalar
pass
elif tree.bc["forth"][0] == "neumann":
# the finest level for the left flux is the node's level; this node
# receives a second contribution because of the boundary condition
matrix_add(tree, matrix, row, (dx*dz)/((dx*dy*dz)), level, i, j, k)
matrix.assemble()
return matrix
def create_bc_scalar(tree, axis, north=None, south=None, east=None, west=None, forth=None, back=None):
scalar = petsc.Vec().create()
number_of_rows = tree.number_of_leaves
scalar.setSizes(number_of_rows, number_of_rows)
scalar.setUp()
if north is None and south is None and east is None and west is None and forth is None and back is None:
north = tree.bc["north"][1]
south = tree.bc["south"][1]
west = tree.bc["west"][1]
east = tree.bc["east"][1]
forth = tree.bc["forth"][1]
back = tree.bc["back"][1]
if axis == 0:
for row in range(number_of_rows):
index = tree.tree_leaves[row]
level = tree.nlevel[index]
i = tree.nindex_x[index]
j = tree.nindex_y[index]
k = tree.nindex_z[index]
dx = tree.ndx[index]
dy = tree.ndy[index]
dz = tree.ndz[index]
#left flux
if i == 0:
if tree.bc["west"][0] == "periodic":
pass
elif tree.bc["west"][0] == "dirichlet":
if tree.dimension == 2:
coords = mesh.boundary_coords(tree, "west", level, 0, j, k)
scalar.setValue(row, -west(coords)*dy/(dx*dy), True)
elif tree.dimension == 3:
coords = mesh.boundary_coords(tree, "west", level, 0, j, k)
scalar.setValue(row, -west(coords)*dy*dz/(dx*dy*dz), True)
elif tree.bc["west"][0] == "neumann":
if tree.dimension == 2:
coords = mesh.boundary_coords(tree, "west", level, 0, j, k)
scalar.setValue(row, -(west(coords)*dx/2)*dy/(dx*dy), True)
elif tree.dimension == 3:
coords = mesh.boundary_coords(tree, "west", level, 0, j, k)
scalar.setValue(row, -(west(coords)*dx/2)*dy*dz/(dx*dy*dz), True)
#right flux
if i == 2**level-1:
if tree.bc["east"][0] == "periodic":
pass
elif tree.bc["east"][0] == "dirichlet":
if tree.dimension == 2:
coords = mesh.boundary_coords(tree, "east", level, 0, j, k)
scalar.setValue(row, east(coords)*dy/(dx*dy), True)
elif tree.dimension == 3:
coords = mesh.boundary_coords(tree, "east", level, 0, j, k)
scalar.setValue(row, east(coords)*dy*dz/(dx*dy*dz), True)
elif tree.bc["east"][0] == "neumann":
if tree.dimension == 2:
coords = mesh.boundary_coords(tree, "east", level, 0, j, k)
scalar.setValue(row, (east(coords)*dx/2)*dy/(dx*dy), True)
elif tree.dimension == 3:
coords = mesh.boundary_coords(tree, "east", level, 0, j, k)
scalar.setValue(row, (east(coords)*dx/2)*dy*dz/(dx*dy*dz), True)
return scalar
elif axis == 1:
for row in range(number_of_rows):
index = tree.tree_leaves[row]
level = tree.nlevel[index]
i = tree.nindex_x[index]
j = tree.nindex_y[index]
k = tree.nindex_z[index]
dx = tree.ndx[index]
dy = tree.ndy[index]
dz = tree.ndz[index]
#left flux
if j == 0:
if tree.bc["south"][0] == "periodic":
pass
elif tree.bc["south"][0] == "dirichlet":
if tree.dimension == 2:
coords = mesh.boundary_coords(tree, "south", level, i, 0, k)
scalar.setValue(row, -south(coords)*dx/(dx*dy), True)
elif tree.dimension == 3:
coords = mesh.boundary_coords(tree, "south", level, i, 0, k)
scalar.setValue(row, -south(coords)*dx*dz/(dx*dy*dz), True)
elif tree.bc["south"][0] == "neumann":
if tree.dimension == 2:
coords = mesh.boundary_coords(tree, "south", level, i, 0, k)
scalar.setValue(row, -(south(coords)*dy/2)*dx/(dx*dy), True)
elif tree.dimension == 3:
coords = mesh.boundary_coords(tree, "south", level, i, 0, k)
scalar.setValue(row, -(south(coords)*dy/2)*dx*dz/(dx*dy*dz), True)
#right flux
if j == 2**level-1:
if tree.bc["north"][0] == "periodic":
pass
elif tree.bc["north"][0] == "dirichlet":
if tree.dimension == 2:
coords = mesh.boundary_coords(tree, "north", level, i, 0, k)
scalar.setValue(row, north(coords)*dx/(dx*dy), True)
elif tree.dimension == 3:
coords = mesh.boundary_coords(tree, "north", level, i, 0, k)
scalar.setValue(row, north(coords)*dx*dz/(dx*dy*dz), True)
elif tree.bc["north"][0] == "neumann":
if tree.dimension == 2:
coords = mesh.boundary_coords(tree, "north", level, i, 0, k)
scalar.setValue(row, (north(coords)*dy/2)*dx/(dx*dy), True)
elif tree.dimension == 3:
coords = mesh.boundary_coords(tree, "north", level, i, 0, k)
scalar.setValue(row, (north(coords)*dy/2)*dx*dz/(dx*dy*dz), True)
return scalar
elif axis == 2:
for row in range(number_of_rows):
index = tree.tree_leaves[row]
level = tree.nlevel[index]
i = tree.nindex_x[index]
j = tree.nindex_y[index]
k = tree.nindex_z[index]
dx = tree.ndx[index]
dy = tree.ndy[index]
dz = tree.ndz[index]
#left flux
if k == 0:
if tree.bc["back"][0] == "periodic":
pass
elif tree.bc["back"][0] == "dirichlet":
coords = mesh.boundary_coords(tree, "back", level, i, j, 0)
scalar.setValue(row, -back(coords)*dx*dy/(dx*dy*dz), True)
elif tree.bc["back"][0] == "neumann":
coords = mesh.boundary_coords(tree, "back", level, i, j, 0)
scalar.setValue(row, -(back(coords)*dz/2)*dx*dy/(dx*dy*dz), True)
#right flux
if k == 2**level-1:
if tree.bc["forth"][0] == "periodic":
pass
elif tree.bc["forth"][0] == "dirichlet":
coords = mesh.boundary_coords(tree, "forth", level, i, j, 0)
scalar.setValue(row, forth(coords)*dx*dy/(dx*dy*dz), True)
elif tree.bc["forth"][0] == "neumann":
coords = mesh.boundary_coords(tree, "forth", level, i, j, 0)
scalar.setValue(row, (forth(coords)*dz/2)*dx*dy/(dx*dy*dz), True)
return scalar
if __name__ == "__main__":
output_module = importlib.import_module(cfg.output_module_name)
tree = mesh.create_new_tree(cfg.dimension, cfg.min_level, cfg.max_level, cfg.stencil_graduation, cfg.stencil_prediction)
tree.tag = "u"
mesh.listing_of_leaves(tree)
print(tree.number_of_leaves)
print("")
divergence_matrix = create_matrix(tree, 0)
divergence_matrix.view()
print("")
for index in tree.tree_leaves:
tree.nvalue[index] = cfg.function(tree.ncoord_x[index], tree.ncoord_y[index])
output_module.write(tree, "finest_grid.dat")
op.run_projection(tree)
op.encode_details(tree)
op.run_thresholding(tree)
op.run_grading(tree)
op.run_pruning(tree)
mesh.listing_of_leaves(tree)
print(tree.number_of_leaves)
print("")
output_module.write(tree, "test_adapted_grid.dat")
divergence_matrix = create_matrix(tree, 0)
divergence_matrix.view()
print("")
divergence_bc = create_bc_scalar(tree, 0)
divergence_bc.view()
print("")
| 44.291161 | 129 | 0.520543 | 3,608 | 25,556 | 3.581208 | 0.061253 | 0.024147 | 0.058355 | 0.085288 | 0.828187 | 0.824549 | 0.789954 | 0.785233 | 0.777571 | 0.767665 | 0 | 0.02303 | 0.34755 | 25,556 | 576 | 130 | 44.368056 | 0.751889 | 0.119346 | 0 | 0.622222 | 0 | 0 | 0.025254 | 0.000988 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005556 | false | 0.033333 | 0.030556 | 0 | 0.052778 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f7e1853a36d9478d521fc02cd268a1f96ec42609 | 38 | py | Python | src/__init__.py | csonido/VCFfrom23AndMe | a25d2c587e5f86d98f99cba63da9fb60fe839f92 | [
"MIT"
] | null | null | null | src/__init__.py | csonido/VCFfrom23AndMe | a25d2c587e5f86d98f99cba63da9fb60fe839f92 | [
"MIT"
] | null | null | null | src/__init__.py | csonido/VCFfrom23AndMe | a25d2c587e5f86d98f99cba63da9fb60fe839f92 | [
"MIT"
] | null | null | null | from ._23andMeToVCF import from_file
| 19 | 37 | 0.842105 | 5 | 38 | 6 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060606 | 0.131579 | 38 | 1 | 38 | 38 | 0.848485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
f7e38b416802bd1d183545e9994bc388ff2ff231 | 46 | py | Python | test_demo.py | leileigong/travis-py-demo | 5b67a7c3922a0c7ce3d2409b1ce410eb5eea9af6 | [
"MIT"
] | null | null | null | test_demo.py | leileigong/travis-py-demo | 5b67a7c3922a0c7ce3d2409b1ce410eb5eea9af6 | [
"MIT"
] | null | null | null | test_demo.py | leileigong/travis-py-demo | 5b67a7c3922a0c7ce3d2409b1ce410eb5eea9af6 | [
"MIT"
] | null | null | null | def test_coll_intersection():
assert True
| 15.333333 | 29 | 0.76087 | 6 | 46 | 5.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 46 | 2 | 30 | 23 | 0.868421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0.5 | true | 0 | 0 | 0 | 0.5 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f7f59b62bb3925d5a26dd4052ddb689617179e89 | 104 | py | Python | tests/exog/random/random_exog_300_1280.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/exog/random/random_exog_300_1280.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/exog/random/random_exog_300_1280.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.exog.test_random_exogenous as testrandexog
testrandexog.test_random_exogenous( 300,1280); | 26 | 55 | 0.865385 | 14 | 104 | 6.142857 | 0.714286 | 0.232558 | 0.44186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.072165 | 0.067308 | 104 | 4 | 56 | 26 | 0.814433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
7906d1ae2c90175bcee045c784344e55fb521f0b | 14,451 | py | Python | tests/test_workflows.py | apikay/celery-director | 3575e9f89690f6f2518c9939be6169fb4383cbed | [
"BSD-3-Clause"
] | 351 | 2020-01-30T14:37:48.000Z | 2022-03-29T11:34:14.000Z | tests/test_workflows.py | apikay/celery-director | 3575e9f89690f6f2518c9939be6169fb4383cbed | [
"BSD-3-Clause"
] | 53 | 2020-02-14T17:06:48.000Z | 2022-03-22T14:37:36.000Z | tests/test_workflows.py | apikay/celery-director | 3575e9f89690f6f2518c9939be6169fb4383cbed | [
"BSD-3-Clause"
] | 33 | 2020-01-31T14:27:21.000Z | 2022-03-10T19:50:06.000Z | import time
import pytest
from celery.result import GroupResult
from celery.schedules import crontab
from kombu.exceptions import EncodeError
from director import build_celery_schedule
from director.exceptions import WorkflowSyntaxError
from director.models.tasks import Task
from director.models.workflows import Workflow
KEYS = ["id", "created", "updated", "task"]
def test_execute_one_task_success(app, create_builder):
workflow, builder = create_builder("example", "WORKFLOW", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_EXAMPLE"
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_EXAMPLE"
assert tasks[0].status.value == "pending"
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() is None
assert result.parent.get() == "task_example"
assert result.parent.state == "SUCCESS"
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "success"
assert task.status.value == "success"
def test_execute_one_task_error(app, create_builder):
workflow, builder = create_builder("example", "ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 3
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_ERROR"
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 1
assert tasks[0].key == "TASK_ERROR"
assert tasks[0].status.value == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.filter_by(id=tasks[0].id).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
def test_execute_chain_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_C"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.parent.parent.get() is None
assert result.parent.get() == "task_c"
assert result.parent.state == "SUCCESS"
assert result.parent.parent.get() == "task_b"
assert result.parent.parent.state == "SUCCESS"
assert result.parent.parent.parent.get() == "task_a"
assert result.parent.parent.parent.state == "SUCCESS"
# DB rows status updated
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_chain_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_CHAIN_ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 5
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert [c.task for c in builder.canvas[1:-1]] == ["TASK_A", "TASK_B", "TASK_ERROR"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_ERROR"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_b = Task.query.filter_by(key="TASK_B").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_b.status.value == "success"
assert task_error.status.value == "error"
assert workflow.status.value == "error"
def test_execute_group_success(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == [
"TASK_B",
"TASK_C",
]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_B", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
assert result.get() is None
assert result.parent.parent.get() == "task_a"
assert isinstance(result.parent, GroupResult)
assert result.parent.get() == ["task_b", "task_c"]
# DB rows status updated
time.sleep(0.5)
with app.app_context():
tasks = Task.query.filter_by(id=tasks[0].id).all()
workflow = Workflow.query.filter_by(id=tasks[0].workflow_id).first()
assert workflow.status.value == "success"
for task in tasks:
assert task.status.value == "success"
def test_execute_group_error(app, create_builder):
workflow, builder = create_builder("example", "SIMPLE_GROUP_ERROR", {})
assert workflow["status"] == "pending"
# Canvas has been built
assert len(builder.canvas) == 4
assert builder.canvas[0].task == "director.tasks.workflows.start"
assert builder.canvas[-1].task == "director.tasks.workflows.end"
assert builder.canvas[1].task == "TASK_A"
group_tasks = builder.canvas[2].tasks
assert len(group_tasks) == 2
assert [group_tasks[0].task, group_tasks[1].task] == ["TASK_ERROR", "TASK_C"]
# Tasks added in DB
with app.app_context():
tasks = Task.query.order_by(Task.created_at.asc()).all()
assert len(tasks) == 3
assert [n.key for n in tasks] == ["TASK_A", "TASK_ERROR", "TASK_C"]
assert set([n.status.value for n in tasks]) == {
"pending",
}
# Tasks executed in Celery
result = builder.run()
with pytest.raises(ZeroDivisionError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_error = Task.query.filter_by(key="TASK_ERROR").first()
task_c = Task.query.filter_by(key="TASK_C").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_error.status.value == "error"
assert task_c.status.value == "success"
assert workflow.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_one_task(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_ONE_TASK", {})
assert workflow["status"] == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task = Task.query.order_by(Task.created_at.asc()).first()
workflow = Workflow.query.filter_by(id=task.workflow_id).first()
assert workflow.status.value == "error"
assert task.status.value == "error"
@pytest.mark.skip_no_worker()
def test_execute_celery_error_multiple_tasks(app, create_builder):
workflow, builder = create_builder("example", "CELERY_ERROR_MULTIPLE_TASKS", {})
assert workflow["status"] == "pending"
# Tasks executed in Celery
result = builder.run()
with pytest.raises(EncodeError):
assert result.get()
# DB rows status updated
time.sleep(0.5)
with app.app_context():
task_a = Task.query.filter_by(key="TASK_A").first()
task_celery_error = Task.query.filter_by(key="TASK_CELERY_ERROR").first()
workflow = Workflow.query.filter_by(id=task_a.workflow_id).first()
assert task_a.status.value == "success"
assert task_celery_error.status.value == "error"
assert workflow.status.value == "error"
def test_return_values(app, create_builder):
workflow, builder = create_builder("example", "RETURN_VALUES", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert tasks["INT"] == 1234
assert tasks["LIST"] == ["jack", "sape", "guido"]
assert tasks["NONE"] is None
assert tasks["DICT"] == {"foo": "bar"}
assert tasks["NESTED"] == {
"jack": 4098,
"sape": 4139,
"guido": 4127,
"nested": {"foo": "bar"},
"none": None,
"list": ["jack", "sape", "guido"],
}
def test_return_exception(app, create_builder):
workflow, builder = create_builder("example", "RETURN_EXCEPTION", {})
result = builder.run()
time.sleep(0.5)
with app.app_context():
tasks = {t.key: t.result for t in Task.query.all()}
assert tasks["STR"] == "return_value"
assert list(tasks["TASK_ERROR"].keys()) == ["exception", "traceback"]
assert tasks["TASK_ERROR"]["exception"] == "division by zero"
assert tasks["TASK_ERROR"]["traceback"].startswith(
"Traceback (most recent call last)"
)
assert "ZeroDivisionError: division by zero" in tasks["TASK_ERROR"]["traceback"]
def test_build_celery_schedule_float_with_payload():
float_schedule = {"payload": {}, "schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
def test_build_celery_schedule_float():
float_schedule = {"schedule": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_week="*", day_of_month="*", month_of_year="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_week="*", day_of_month="*", month_of_year="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_week="1", day_of_month="*", month_of_year="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_week="*", day_of_month="1", month_of_year="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_week="*", day_of_month="*", month_of_year="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_week="*/12", day_of_month="*/13", month_of_year="*/14")
)
]
)
def test_build_celery_schedule_crontab(test_input, expected):
cron_schedule = {"schedule": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_interval():
float_schedule = {"interval": 30.0}
assert ("30.0", 30.0) == build_celery_schedule("workflow_schedule_float", float_schedule)
@pytest.mark.parametrize(
"test_input, expected",
[
("1 * * * *", crontab(minute="1", hour="*", day_of_month="*", month_of_year="*", day_of_week="*")),
("* 1 * * *", crontab(minute="*", hour="1", day_of_month="*", month_of_year="*", day_of_week="*")),
("* * 1 * *", crontab(minute="*", hour="*", day_of_month="1", month_of_year="*", day_of_week="*")),
("* * * 1 *", crontab(minute="*", hour="*", day_of_month="*", month_of_year="1", day_of_week="*")),
("* * * * 1", crontab(minute="*", hour="*", day_of_month="*", month_of_year="*", day_of_week="1")),
(
"*/10 */11 */12 */13 */14",
crontab(minute="*/10", hour="*/11", day_of_month="*/12", month_of_year="*/13", day_of_week="*/14")
)
]
)
def test_build_celery_crontab(test_input, expected):
cron_schedule = {"crontab": test_input}
assert (test_input, expected) == build_celery_schedule("workflow_crontab", cron_schedule)
def test_build_celery_invalid_crontab():
# missing one element on the crontab syntax
periodic_conf = {"crontab": "* * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", periodic_conf)
def test_build_celery_invalid_schedule():
cron_schedule = {"crontab": "* * * * 12"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_crontab", cron_schedule)
def test_build_celery_invalid_periodic_key():
cron_schedule = {"non_valid_key": "* * * * *"}
with pytest.raises(WorkflowSyntaxError):
build_celery_schedule("workflow_invalid_key", cron_schedule)
| 36.959079 | 110 | 0.652135 | 1,914 | 14,451 | 4.729363 | 0.077325 | 0.03281 | 0.028723 | 0.030049 | 0.863897 | 0.84335 | 0.816173 | 0.803027 | 0.790212 | 0.757402 | 0 | 0.014925 | 0.188637 | 14,451 | 390 | 111 | 37.053846 | 0.7571 | 0.046018 | 0 | 0.569444 | 0 | 0 | 0.148372 | 0.037293 | 0 | 0 | 0 | 0 | 0.388889 | 1 | 0.0625 | false | 0 | 0.03125 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
7920f12ca70f3c6bd1519cfad966774a16f7938d | 34 | py | Python | hcap_utils/models/__init__.py | fabiommendes/capacidade_hospitalar | 4f675b574573eb3f51e6be8a927ea230bf2712c7 | [
"MIT"
] | null | null | null | hcap_utils/models/__init__.py | fabiommendes/capacidade_hospitalar | 4f675b574573eb3f51e6be8a927ea230bf2712c7 | [
"MIT"
] | 31 | 2020-04-11T13:38:17.000Z | 2021-09-22T18:51:11.000Z | hcap_utils/models/__init__.py | fabiommendes/capacidade_hospitalar | 4f675b574573eb3f51e6be8a927ea230bf2712c7 | [
"MIT"
] | 1 | 2020-04-08T17:04:39.000Z | 2020-04-08T17:04:39.000Z | from .seed_state import SeedState
| 17 | 33 | 0.852941 | 5 | 34 | 5.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 34 | 1 | 34 | 34 | 0.933333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
7921827290e630b4e2ef42e13f3eeadbd70a01a1 | 15,652 | py | Python | src/tests/orga/views/test_orga_views_mail.py | hrchu/pretalx | cd7e5525f80c7290d9650065b4cf4f085032adfc | [
"Apache-2.0"
] | 3 | 2020-03-28T06:21:27.000Z | 2020-03-28T12:59:21.000Z | src/tests/orga/views/test_orga_views_mail.py | hrchu/pretalx | cd7e5525f80c7290d9650065b4cf4f085032adfc | [
"Apache-2.0"
] | 14 | 2020-03-27T22:46:38.000Z | 2020-03-29T18:40:02.000Z | src/tests/orga/views/test_orga_views_mail.py | hrchu/pretalx | cd7e5525f80c7290d9650065b4cf4f085032adfc | [
"Apache-2.0"
] | 4 | 2020-03-21T10:33:20.000Z | 2020-03-28T10:14:19.000Z | import pytest
from django.core import mail as djmail
from django_scopes import scope
from pretalx.mail.models import MailTemplate, QueuedMail
@pytest.mark.django_db
def test_orga_can_view_pending_mails(orga_client, event, mail, other_mail):
response = orga_client.get(event.orga_urls.outbox)
assert response.status_code == 200
assert mail.subject in response.content.decode()
@pytest.mark.django_db
def test_orga_can_view_sent_mails(orga_client, event, sent_mail):
response = orga_client.get(event.orga_urls.sent_mails)
assert response.status_code == 200
assert sent_mail.subject in response.content.decode()
@pytest.mark.django_db
def test_orga_can_view_pending_mail(orga_client, event, mail):
response = orga_client.get(mail.urls.base)
assert response.status_code == 200
assert mail.subject in response.content.decode()
@pytest.mark.django_db
def test_orga_can_edit_pending_mail(orga_client, event, mail):
djmail.outbox = []
response = orga_client.post(
mail.urls.base,
follow=True,
data={
"to": "testWIN@gmail.com",
"bcc": mail.bcc or "",
"cc": mail.cc or "",
"reply_to": mail.reply_to or "",
"subject": mail.subject,
"text": mail.text or "",
},
)
assert response.status_code == 200
assert mail.subject in response.content.decode()
mail.refresh_from_db()
assert mail.to == "testwin@gmail.com"
assert len(djmail.outbox) == 0
@pytest.mark.django_db
def test_orga_can_edit_and_send_pending_mail(orga_client, event, mail):
djmail.outbox = []
response = orga_client.post(
mail.urls.base,
follow=True,
data={
"to": "testWIN@gmail.com",
"bcc": "foo@bar.com,bar@bar.com",
"cc": "",
"reply_to": mail.reply_to,
"subject": mail.subject,
"text": "This is the best test.",
"form": "send",
},
)
assert response.status_code == 200
assert (
mail.subject not in response.content.decode()
) # Is now in the sent mail view, not in the outbox
mail.refresh_from_db()
assert mail.to == "testwin@gmail.com"
assert mail.cc != "None"
assert len(djmail.outbox) == 1
real_mail = djmail.outbox[0]
assert real_mail.body == "This is the best test."
assert real_mail.to == ["testwin@gmail.com"]
assert real_mail.cc == [""]
assert real_mail.bcc == ["foo@bar.com", "bar@bar.com"]
@pytest.mark.django_db
def test_orga_can_view_sent_mail(orga_client, event, sent_mail):
response = orga_client.get(sent_mail.urls.base)
assert response.status_code == 200
assert sent_mail.subject in response.content.decode()
@pytest.mark.django_db
def test_orga_cannot_edit_sent_mail(orga_client, event, sent_mail):
response = orga_client.post(
sent_mail.urls.base,
follow=True,
data={
"to": "testfailure@gmail.com",
"bcc": sent_mail.bcc or "",
"cc": sent_mail.cc or "",
"reply_to": sent_mail.reply_to or "",
"subject": "WILD NEW SUBJECT APPEARS",
"text": sent_mail.text or "",
},
)
assert response.status_code == 200
assert sent_mail.subject in response.content.decode()
sent_mail.refresh_from_db()
assert sent_mail.to != "testfailure@gmail.com"
@pytest.mark.django_db
def test_orga_can_send_all_mails(orga_client, event, mail, other_mail, sent_mail):
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 2
response = orga_client.get(event.orga_urls.send_outbox, follow=True)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 2
response = orga_client.post(event.orga_urls.send_outbox, follow=True)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 0
@pytest.mark.django_db
def test_orga_can_send_single_mail(orga_client, event, mail, other_mail):
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 2
response = orga_client.get(mail.urls.send, follow=True)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 1
@pytest.mark.django_db
def test_orga_can_discard_all_mails(orga_client, event, mail, other_mail, sent_mail):
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 2
assert QueuedMail.objects.count() == 3
response = orga_client.get(event.orga_urls.purge_outbox, follow=True)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 2
assert QueuedMail.objects.count() == 3
response = orga_client.post(event.orga_urls.purge_outbox, follow=True)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 0
assert QueuedMail.objects.count() == 1
@pytest.mark.django_db
def test_orga_can_discard_single_mail(orga_client, event, mail, other_mail):
with scope(event=event):
assert QueuedMail.objects.count() == 2
response = orga_client.get(mail.urls.delete, follow=True)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.count() == 1
@pytest.mark.django_db
def test_orga_cannot_send_sent_mail(orga_client, event, sent_mail):
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=False).count() == 1
response = orga_client.get(sent_mail.urls.send, follow=True)
before = sent_mail.sent
sent_mail.refresh_from_db()
assert sent_mail.sent == before
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=False).count() == 1
@pytest.mark.django_db
def test_orga_cannot_discard_sent_mail(orga_client, event, sent_mail):
with scope(event=event):
assert QueuedMail.objects.count() == 1
response = orga_client.get(sent_mail.urls.delete, follow=True)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.count() == 1
@pytest.mark.django_db
def test_orga_can_copy_sent_mail(orga_client, event, sent_mail):
with scope(event=event):
assert QueuedMail.objects.count() == 1
response = orga_client.get(sent_mail.urls.copy, follow=True)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.count() == 2
@pytest.mark.django_db
def test_orga_can_view_templates(orga_client, event, mail_template):
response = orga_client.get(event.orga_urls.mail_templates, follow=True)
assert response.status_code == 200
@pytest.mark.django_db
def test_orga_can_create_template(orga_client, event, mail_template):
with scope(event=event):
assert MailTemplate.objects.count() == 6
response = orga_client.post(
event.orga_urls.new_template,
follow=True,
data={"subject_0": "[test] subject", "text_0": "text"},
)
assert response.status_code == 200
with scope(event=event):
assert MailTemplate.objects.count() == 7
assert MailTemplate.objects.get(event=event, subject__contains="[test] subject")
@pytest.mark.django_db
@pytest.mark.parametrize("variant", ("custom", "fixed"))
def test_orga_can_edit_template(orga_client, event, mail_template, variant):
if variant == "fixed":
mail_template = event.ack_template
with scope(event=event):
assert MailTemplate.objects.count() == 6
response = orga_client.get(mail_template.urls.edit, follow=True)
assert response.status_code == 200
response = orga_client.post(
mail_template.urls.edit,
follow=True,
data={
"subject_0": "COMPLETELY NEW AND UNHEARD OF",
"text_0": mail_template.text,
},
)
assert response.status_code == 200
with scope(event=event):
assert MailTemplate.objects.count() == 6
assert MailTemplate.objects.get(
event=event, subject__contains="COMPLETELY NEW AND UNHEARD OF"
)
@pytest.mark.django_db
def test_orga_cannot_add_wrong_placeholder_in_template(orga_client, event):
with scope(event=event):
assert MailTemplate.objects.count() == 5
mail_template = event.ack_template
response = orga_client.post(
mail_template.urls.edit,
follow=True,
data={
"subject_0": "COMPLETELY NEW AND UNHEARD OF",
"text_0": str(mail_template.text) + "{wrong_placeholder}",
},
)
assert response.status_code == 200
with scope(event=event):
mail_template.refresh_from_db()
assert "COMPLETELY" not in str(mail_template.subject)
assert "{wrong_placeholder}" not in str(mail_template.text)
@pytest.mark.django_db
def test_orga_can_delete_template(orga_client, event, mail_template):
with scope(event=event):
assert MailTemplate.objects.count() == 6
response = orga_client.post(mail_template.urls.delete, follow=True)
assert response.status_code == 200
with scope(event=event):
assert MailTemplate.objects.count() == 5
@pytest.mark.django_db
def test_orga_can_compose_single_mail(orga_client, event, submission):
response = orga_client.get(event.orga_urls.compose_mails, follow=True,)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 0
response = orga_client.post(
event.orga_urls.compose_mails,
follow=True,
data={
"recipients": "submitted",
"bcc": "",
"cc": "",
"reply_to": "",
"subject": "foo",
"text": "bar",
},
)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 1
@pytest.mark.django_db
def test_orga_can_compose_mail_for_track(orga_client, event, submission, track):
with scope(event=event):
submission.track = track
submission.save()
response = orga_client.get(event.orga_urls.compose_mails, follow=True,)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 0
response = orga_client.post(
event.orga_urls.compose_mails,
follow=True,
data={
"bcc": "",
"cc": "",
"reply_to": "",
"subject": "foo",
"text": "bar",
"tracks": [track.pk],
},
)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 1
@pytest.mark.django_db
def test_orga_can_compose_mail_for_submission_type(orga_client, event, submission):
response = orga_client.get(event.orga_urls.compose_mails, follow=True,)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 0
response = orga_client.post(
event.orga_urls.compose_mails,
follow=True,
data={
"bcc": "",
"cc": "",
"reply_to": "",
"subject": "foo",
"text": "bar",
"submission_types": [submission.submission_type.pk],
},
)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 1
@pytest.mark.django_db
def test_orga_can_compose_mail_for_track_and_type_no_doubles(
orga_client, event, submission, track
):
with scope(event=event):
submission.track = track
submission.save()
response = orga_client.get(event.orga_urls.compose_mails, follow=True,)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 0
response = orga_client.post(
event.orga_urls.compose_mails,
follow=True,
data={
"bcc": "",
"cc": "",
"reply_to": "",
"subject": "foo",
"text": "bar",
"tracks": [track.pk],
"submission_types": [submission.submission_type.pk],
},
)
assert response.status_code == 200
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 1
@pytest.mark.django_db
def test_orga_can_compose_single_mail_selected_submissions(
orga_client, event, submission, other_submission
):
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 0
response = orga_client.post(
event.orga_urls.compose_mails,
follow=True,
data={
"submissions": [other_submission.code],
"bcc": "",
"cc": "",
"reply_to": "",
"subject": "foo",
"text": "bar",
},
)
assert response.status_code == 200
with scope(event=event):
mails = list(QueuedMail.objects.filter(sent__isnull=True))
assert len(mails) == 1
assert not mails[0].to
assert list(mails[0].to_users.all()) == [other_submission.speakers.first()]
@pytest.mark.django_db
def test_orga_can_compose_single_mail_reviewers(
orga_client, event, orga_user, review_user
):
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 0
response = orga_client.post(
event.orga_urls.compose_mails,
follow=True,
data={
"recipients": "reviewers",
"bcc": "",
"cc": "",
"reply_to": "",
"subject": "foo",
"text": "bar",
},
)
assert response.status_code == 200
with scope(event=event):
mails = list(QueuedMail.objects.filter(sent__isnull=True))
assert len(mails) == 1
assert not mails[0].to
assert list(mails[0].to_users.all()) == [review_user]
@pytest.mark.django_db
def test_orga_can_compose_mail_to_speakers_with_no_slides(
orga_client, event, orga_user, slot, confirmed_submission
):
with scope(event=event):
assert QueuedMail.objects.filter(sent__isnull=True).count() == 1
response = orga_client.post(
event.orga_urls.compose_mails,
follow=True,
data={
"recipients": "no_slides",
"bcc": "",
"cc": "",
"reply_to": "",
"subject": "foo",
"text": "bar",
},
)
assert response.status_code == 200
with scope(event=event):
mails = list(QueuedMail.objects.filter(sent__isnull=True))
assert len(mails) == 2
assert not mails[-1].to
assert list(mails[-1].to_users.all()) == [confirmed_submission.speakers.first()]
@pytest.mark.django_db
def test_orga_can_compose_single_mail_from_template(orga_client, event, submission):
response = orga_client.get(
event.orga_urls.compose_mails
+ f"?template={event.ack_template.pk}&submission={submission.code}",
follow=True,
)
assert response.status_code == 200
with scope(event=event):
assert str(event.ack_template.subject) in response.content.decode()
| 34.174672 | 88 | 0.655188 | 1,981 | 15,652 | 4.940434 | 0.069662 | 0.062328 | 0.058649 | 0.079595 | 0.878104 | 0.837335 | 0.824461 | 0.799837 | 0.758455 | 0.719219 | 0 | 0.012918 | 0.223486 | 15,652 | 457 | 89 | 34.249453 | 0.792332 | 0.003003 | 0 | 0.669154 | 0 | 0 | 0.060822 | 0.008139 | 0 | 0 | 0 | 0 | 0.256219 | 1 | 0.067164 | false | 0 | 0.00995 | 0 | 0.077114 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
793429e4e031135aa539ac7e06e5ce355b4567d6 | 3,403 | py | Python | networkapi/api_ip/permissions.py | vinicius-marinho/GloboNetworkAPI | 94651d3b4dd180769bc40ec966814f3427ccfb5b | [
"Apache-2.0"
] | 73 | 2015-04-13T17:56:11.000Z | 2022-03-24T06:13:07.000Z | networkapi/api_ip/permissions.py | leopoldomauricio/GloboNetworkAPI | 3b5b2e336d9eb53b2c113977bfe466b23a50aa29 | [
"Apache-2.0"
] | 99 | 2015-04-03T01:04:46.000Z | 2021-10-03T23:24:48.000Z | networkapi/api_ip/permissions.py | shildenbrand/GloboNetworkAPI | 515d5e961456cee657c08c275faa1b69b7452719 | [
"Apache-2.0"
] | 64 | 2015-08-05T21:26:29.000Z | 2022-03-22T01:06:28.000Z | # -*- coding: utf-8 -*-
from rest_framework.permissions import BasePermission
from networkapi.admin_permission import AdminPermission
from networkapi.api_ip.facade import get_ipv4_by_ids
from networkapi.api_ip.facade import get_ipv6_by_ids
from networkapi.api_network.facade.v3 import get_networkipv4_by_ids
from networkapi.api_network.facade.v3 import get_networkipv6_by_ids
from networkapi.auth import has_perm
from networkapi.auth import validate_object_perm
class Read(BasePermission):
def has_permission(self, request, view):
return has_perm(
request.user,
AdminPermission.IPS,
AdminPermission.READ_OPERATION
)
class Write(BasePermission):
def has_permission(self, request, view):
return has_perm(
request.user,
AdminPermission.IPS,
AdminPermission.WRITE_OPERATION
)
def perm_objv4(request, operation, object_type, *args, **kwargs):
if request.method == 'POST':
objs = [net['networkipv4'] for net in request.DATA['ips']]
objs = get_networkipv4_by_ids(objs)\
.values_list('vlan', flat=True)
else:
objs = get_ipv4_by_ids(kwargs.get('obj_ids', []).split(';'))\
.values_list('networkipv4__vlan', flat=True)
return validate_object_perm(
objs,
request.user,
operation,
object_type
)
def perm_objv6(request, operation, object_type, *args, **kwargs):
if request.method == 'POST':
objs = [net['networkipv6'] for net in request.DATA['ips']]
objs = get_networkipv6_by_ids(objs)\
.values_list('vlan', flat=True)
else:
objs = get_ipv6_by_ids(kwargs.get('obj_ids', []).split(';'))\
.values_list('networkipv6__vlan', flat=True)
return validate_object_perm(
objs,
request.user,
operation,
object_type
)
def write_objv4_permission(request, *args, **kwargs):
class Perm(BasePermission):
def has_permission(self, request, view):
return perm_objv4(
request,
AdminPermission.OBJ_WRITE_OPERATION,
AdminPermission.OBJ_TYPE_VLAN,
*args,
**kwargs
)
return Perm
def read_objv4_permission(request, *args, **kwargs):
class Perm(BasePermission):
def has_permission(self, request, view):
return perm_objv4(
request,
AdminPermission.OBJ_READ_OPERATION,
AdminPermission.OBJ_TYPE_VLAN,
*args,
**kwargs
)
return Perm
def write_objv6_permission(request, *args, **kwargs):
class Perm(BasePermission):
def has_permission(self, request, view):
return perm_objv6(
request,
AdminPermission.OBJ_WRITE_OPERATION,
AdminPermission.OBJ_TYPE_VLAN,
*args,
**kwargs
)
return Perm
def read_objv6_permission(request, *args, **kwargs):
class Perm(BasePermission):
def has_permission(self, request, view):
return perm_objv6(
request,
AdminPermission.OBJ_READ_OPERATION,
AdminPermission.OBJ_TYPE_VLAN,
*args,
**kwargs
)
return Perm
| 25.977099 | 69 | 0.607699 | 356 | 3,403 | 5.553371 | 0.174157 | 0.050582 | 0.060698 | 0.091047 | 0.814365 | 0.811836 | 0.811836 | 0.777441 | 0.748103 | 0.712696 | 0 | 0.010544 | 0.303262 | 3,403 | 130 | 70 | 26.176923 | 0.823281 | 0.006171 | 0 | 0.638298 | 0 | 0 | 0.027811 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.085106 | 0.06383 | 0.404255 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f718d4a046685a88df8c7e67861745f498bb0714 | 96 | py | Python | venv/lib/python3.8/site-packages/pkginfo/tests/__init__.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/pkginfo/tests/__init__.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/pkginfo/tests/__init__.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/2c/59/97/f8e5f25cbfc169c1e81504fc2144624a0b7d4d17526ee7745023ffd740 | 96 | 96 | 0.895833 | 9 | 96 | 9.555556 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.447917 | 0 | 96 | 1 | 96 | 96 | 0.447917 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
f78860f5a77861b4004faabf6c76b527b73a59a2 | 65 | py | Python | paper_code/distributed_evolution/populations/__init__.py | adam-katona/QualityEvolvabilityES | ebb96e1dbc2422109714c0f5c8174073f9cc6c6f | [
"MIT"
] | 1 | 2021-10-06T15:08:42.000Z | 2021-10-06T15:08:42.000Z | paper_code/distributed_evolution/populations/__init__.py | adam-katona/QualityEvolvabilityES | ebb96e1dbc2422109714c0f5c8174073f9cc6c6f | [
"MIT"
] | null | null | null | paper_code/distributed_evolution/populations/__init__.py | adam-katona/QualityEvolvabilityES | ebb96e1dbc2422109714c0f5c8174073f9cc6c6f | [
"MIT"
] | null | null | null | from .mix_normal import MixtureNormal
from .normal import Normal
| 21.666667 | 37 | 0.846154 | 9 | 65 | 6 | 0.555556 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123077 | 65 | 2 | 38 | 32.5 | 0.947368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
e3f5978044de62989d3c0468fdf418711ec6f0d2 | 8,207 | py | Python | test_autolens/integration/tests/interferometer/full_pipeline/hyper_no_lens_light_bg.py | harshitjindal/PyAutoLens | f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035 | [
"MIT"
] | 1 | 2020-04-06T20:07:56.000Z | 2020-04-06T20:07:56.000Z | test_autolens/integration/tests/interferometer/full_pipeline/hyper_no_lens_light_bg.py | harshitjindal/PyAutoLens | f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035 | [
"MIT"
] | null | null | null | test_autolens/integration/tests/interferometer/full_pipeline/hyper_no_lens_light_bg.py | harshitjindal/PyAutoLens | f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035 | [
"MIT"
] | null | null | null | import autofit as af
import autolens as al
from test_autolens.integration.tests.interferometer import runner
test_type = "full_pipeline"
test_name = "hyper_no_lens_light_bg"
data_type = "lens_sie__source_smooth"
data_resolution = "sma"
def make_pipeline(
name,
phase_folders,
pipeline_pixelization=al.pix.VoronoiBrightnessImage,
pipeline_regularization=al.reg.AdaptiveBrightness,
optimizer_class=af.MultiNest,
):
phase1 = al.PhaseInterferometer(
phase_name="phase_1__lens_sie__source_sersic",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5, mass=al.mp.EllipticalIsothermal, shear=al.mp.ExternalShear
),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
real_space_shape_2d=real_space_shape_2d,
real_space_pixel_scales=real_space_pixel_scales,
optimizer_class=optimizer_class,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 80
phase1.optimizer.sampling_efficiency = 0.2
phase1 = phase1.extend_with_multiple_hyper_phases(
hyper_galaxy=True, include_background_sky=True, include_background_noise=True
)
class InversionPhase(al.PhaseInterferometer):
def customize_priors(self, results):
## Lens Mass, SIE -> SIE, Shear -> Shear ###
self.galaxies.lens = results.from_phase(
"phase_1__lens_sie__source_sersic"
).model.galaxies.lens
## Set all hyper-galaxies if feature is turned on ##
self.hyper_image_sky = results.last.hyper_combined.instance.hyper_image_sky
self.hyper_background_noise = (
results.last.hyper_combined.instance.hyper_background_noise
)
phase2 = InversionPhase(
phase_name="phase_1_initialize_magnification_inversion",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5, mass=al.mp.EllipticalIsothermal, shear=al.mp.ExternalShear
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=al.pix.VoronoiMagnification,
regularization=al.reg.Constant,
),
),
real_space_shape_2d=real_space_shape_2d,
real_space_pixel_scales=real_space_pixel_scales,
optimizer_class=optimizer_class,
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 20
phase2.optimizer.sampling_efficiency = 0.8
phase2 = phase2.extend_with_multiple_hyper_phases(
hyper_galaxy=True,
include_background_sky=True,
include_background_noise=True,
inversion=False,
)
class InversionPhase(al.PhaseInterferometer):
def customize_priors(self, results):
### Lens Mass, SIE -> SIE, Shear -> Shear ###
self.galaxies.lens = results.from_phase(
"phase_1__lens_sie__source_sersic"
).model.galaxies.lens
### Source Inversion, Inv -> Inv ###
self.galaxies.source = results.from_phase(
"phase_1_initialize_magnification_inversion"
).model.galaxies.source
## Set all hyper-galaxies if feature is turned on ##
self.hyper_image_sky = results.last.hyper_combined.instance.hyper_image_sky
self.hyper_background_noise = (
results.last.hyper_combined.instance.hyper_background_noise
)
phase3 = InversionPhase(
phase_name="phase_3__lens_sie__source_magnification_inversion",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5, mass=al.mp.EllipticalIsothermal, shear=al.mp.ExternalShear
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=al.pix.VoronoiMagnification,
regularization=al.reg.Constant,
),
),
real_space_shape_2d=real_space_shape_2d,
real_space_pixel_scales=real_space_pixel_scales,
optimizer_class=optimizer_class,
)
phase3.optimizer.const_efficiency_mode = True
phase3.optimizer.n_live_points = 50
phase3.optimizer.sampling_efficiency = 0.5
phase3 = phase3.extend_with_multiple_hyper_phases(
hyper_galaxy=True,
include_background_sky=True,
include_background_noise=True,
inversion=False,
)
class InversionPhase(al.PhaseInterferometer):
def customize_priors(self, results):
## Lens Mass, SIE -> SIE, Shear -> Shear ###
self.galaxies.lens = results.from_phase(
"phase_3__lens_sie__source_magnification_inversion"
).model.galaxies.lens
## Set all hyper-galaxies if feature is turned on ##
self.hyper_image_sky = results.last.hyper_combined.instance.hyper_image_sky
self.hyper_background_noise = (
results.last.hyper_combined.instance.hyper_background_noise
)
phase4 = InversionPhase(
phase_name="phase_4__initialize_inversion",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5, mass=al.mp.EllipticalIsothermal, shear=al.mp.ExternalShear
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=pipeline_pixelization,
regularization=pipeline_regularization,
),
),
real_space_shape_2d=real_space_shape_2d,
real_space_pixel_scales=real_space_pixel_scales,
optimizer_class=optimizer_class,
)
phase4.optimizer.const_efficiency_mode = True
phase4.optimizer.n_live_points = 20
phase4.optimizer.sampling_efficiency = 0.8
phase4 = phase4.extend_with_multiple_hyper_phases(
hyper_galaxy=True,
include_background_sky=True,
include_background_noise=True,
inversion=True,
)
class InversionPhase(al.PhaseInterferometer):
def customize_priors(self, results):
### Lens Mass, SIE -> SIE, Shear -> Shear ###
self.galaxies.lens = results.from_phase(
"phase_3__lens_sie__source_magnification_inversion"
).model.galaxies.lens
### Source Inversion, Inv -> Inv ###
self.galaxies.source = results.from_phase(
"phase_4__initialize_inversion"
).hyper_combined.model.galaxies.source
## Set all hyper-galaxies if feature is turned on ##
self.galaxies.source.hyper_galaxy = (
results.last.hyper_combined.instance.galaxies.source.hyper_galaxy
)
self.hyper_image_sky = results.last.hyper_combined.instance.hyper_image_sky
self.hyper_background_noise = (
results.last.hyper_combined.instance.hyper_background_noise
)
phase5 = InversionPhase(
phase_name="phase_5__lens_sie__source_inversion",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5, mass=al.mp.EllipticalIsothermal, shear=al.mp.ExternalShear
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=pipeline_pixelization,
regularization=pipeline_regularization,
),
),
real_space_shape_2d=real_space_shape_2d,
real_space_pixel_scales=real_space_pixel_scales,
optimizer_class=optimizer_class,
)
phase5.optimizer.const_efficiency_mode = True
phase5.optimizer.n_live_points = 50
phase5.optimizer.sampling_efficiency = 0.5
phase5 = phase5.extend_with_multiple_hyper_phases(
hyper_galaxy=True,
include_background_sky=True,
include_background_noise=True,
inversion=True,
)
return al.PipelineDataset(name, phase1, phase2, phase3, phase4, phase5)
if __name__ == "__main__":
import sys
runner.run(sys.modules[__name__])
| 33.092742 | 88 | 0.652248 | 879 | 8,207 | 5.734926 | 0.137656 | 0.035707 | 0.041658 | 0.03174 | 0.816306 | 0.72803 | 0.723071 | 0.719302 | 0.719302 | 0.719302 | 0 | 0.016003 | 0.269039 | 8,207 | 247 | 89 | 33.226721 | 0.824304 | 0.04947 | 0 | 0.61828 | 0 | 0 | 0.063105 | 0.060008 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026882 | false | 0 | 0.021505 | 0 | 0.075269 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
5808d671682913da309e52da5f6913ab22c74888 | 45 | py | Python | hydro/__init__.py | capruitt/hydro | bb128b3c1381eff735bc8e89ef84273f3ee1f550 | [
"MIT"
] | 3 | 2016-12-21T16:31:51.000Z | 2017-01-22T12:50:26.000Z | hydro/__init__.py | capruitt/hydro | bb128b3c1381eff735bc8e89ef84273f3ee1f550 | [
"MIT"
] | null | null | null | hydro/__init__.py | capruitt/hydro | bb128b3c1381eff735bc8e89ef84273f3ee1f550 | [
"MIT"
] | 5 | 2016-08-19T23:23:55.000Z | 2020-10-22T18:13:01.000Z | from .core import *
from .geography import *
| 15 | 24 | 0.733333 | 6 | 45 | 5.5 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.177778 | 45 | 2 | 25 | 22.5 | 0.891892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
58752c4c54956c2e50a39d8fffb3417dacdb4695 | 31 | py | Python | tests/test_visualize.py | matthewfeickert/yadage | bf6531a3f430bb409119332398f3fa6edde5e997 | [
"MIT"
] | 14 | 2017-01-09T03:48:51.000Z | 2018-07-03T06:59:11.000Z | tests/test_visualize.py | matthewfeickert/yadage | bf6531a3f430bb409119332398f3fa6edde5e997 | [
"MIT"
] | 52 | 2017-05-11T10:12:54.000Z | 2018-06-24T15:52:31.000Z | tests/test_visualize.py | lukasheinrich/yadage | 314078ec6e015c37e60b30e007bc02694e69e011 | [
"MIT"
] | 5 | 2019-01-29T10:50:30.000Z | 2020-05-12T14:10:30.000Z | def test_visualize():
pass
| 10.333333 | 21 | 0.677419 | 4 | 31 | 5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.225806 | 31 | 2 | 22 | 15.5 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
5876c56efbabb0b981dc3f6f5c8a8b0bde130094 | 3,467 | py | Python | tests/functional/test_full_upgrade.py | AKhodus/adcm | 98dbf22af3f1c6afa94505e9acaff0ac4088a602 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_full_upgrade.py | AKhodus/adcm | 98dbf22af3f1c6afa94505e9acaff0ac4088a602 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_full_upgrade.py | AKhodus/adcm | 98dbf22af3f1c6afa94505e9acaff0ac4088a602 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from adcm_client.objects import ADCMClient
from adcm_pytest_plugin.utils import get_data_dir
import allure
def test_full_upgrade_hostprovider_first(sdk_client_fs: ADCMClient):
"""Create cluster and hostprovider with host and components
and upgrade cluster and host with provider after that
and check that all was upgraded.
"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'cluster'))
sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'upgradable_cluster'))
cluster = bundle.cluster_create("test")
service = cluster.service_add(name="zookeeper")
comp = service.component(name='master')
hp_bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider'))
sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'upgradable_hostprovider'))
hostprovider = hp_bundle.provider_create("test")
host = hostprovider.host_create(fqdn="localhost")
cluster.host_add(host)
cluster.hostcomponent_set((host, comp))
upgr_hp = hostprovider.upgrade(name='upgrade to 2.0')
upgr_hp.do()
upgr_cl = cluster.upgrade(name='upgrade to 1.6')
upgr_cl.do()
cluster.reread()
service.reread()
hostprovider.reread()
host.reread()
with allure.step('Check cluster, service, hostprovider, host were upgraded'):
assert cluster.prototype().version == '1.6'
assert service.prototype().version == '3.4.11'
assert hostprovider.prototype().version == '2.0'
assert host.prototype().version == '00.10'
def test_full_upgrade_cluster_first(sdk_client_fs: ADCMClient):
"""Create cluster and hostprovider with host and components
and upgrade cluster and host with provider after that
and check that all was upgraded.
"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'cluster'))
sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'upgradable_cluster'))
cluster = bundle.cluster_create("test")
service = cluster.service_add(name="zookeeper")
comp = service.component(name='master')
hp_bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider'))
sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'upgradable_hostprovider'))
hostprovider = hp_bundle.provider_create("test")
host = hostprovider.host_create(fqdn="localhost")
cluster.host_add(host)
cluster.hostcomponent_set((host, comp))
upgr_cl = cluster.upgrade(name='upgrade to 1.6')
upgr_cl.do()
upgr_hp = hostprovider.upgrade(name='upgrade to 2.0')
upgr_hp.do()
cluster.reread()
service.reread()
hostprovider.reread()
host.reread()
with allure.step('Check cluster, service, hostprovider, host were upgraded'):
assert cluster.prototype().version == '1.6'
assert service.prototype().version == '3.4.11'
assert hostprovider.prototype().version == '2.0'
assert host.prototype().version == '00.10'
| 45.025974 | 84 | 0.731468 | 475 | 3,467 | 5.075789 | 0.254737 | 0.037329 | 0.045624 | 0.056408 | 0.767316 | 0.767316 | 0.767316 | 0.767316 | 0.767316 | 0.767316 | 0 | 0.012384 | 0.161523 | 3,467 | 76 | 85 | 45.618421 | 0.816993 | 0.234497 | 0 | 0.90566 | 0 | 0 | 0.148063 | 0.017645 | 0 | 0 | 0 | 0 | 0.150943 | 1 | 0.037736 | false | 0 | 0.056604 | 0 | 0.09434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
58a18f9db4d7e7e8a1b1b0f6e317a25302f51be1 | 24 | py | Python | acq4/filetypes/__init__.py | ablot/acq4 | ba7cd340d9d0282640adb501d3788f8c0837e4c4 | [
"MIT"
] | null | null | null | acq4/filetypes/__init__.py | ablot/acq4 | ba7cd340d9d0282640adb501d3788f8c0837e4c4 | [
"MIT"
] | null | null | null | acq4/filetypes/__init__.py | ablot/acq4 | ba7cd340d9d0282640adb501d3788f8c0837e4c4 | [
"MIT"
] | null | null | null | from filetypes import *
| 12 | 23 | 0.791667 | 3 | 24 | 6.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 24 | 1 | 24 | 24 | 0.95 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
54491a3931c9c46d24b5b0c9ff16ef8b5d326e34 | 179 | py | Python | repos/spiketoolkit/spiketoolkit/preprocessing/__init__.py | tjd2002/spikeforest2 | 2e393564b858b2995aa2ccccd9bd73065681b5de | [
"Apache-2.0"
] | null | null | null | repos/spiketoolkit/spiketoolkit/preprocessing/__init__.py | tjd2002/spikeforest2 | 2e393564b858b2995aa2ccccd9bd73065681b5de | [
"Apache-2.0"
] | null | null | null | repos/spiketoolkit/spiketoolkit/preprocessing/__init__.py | tjd2002/spikeforest2 | 2e393564b858b2995aa2ccccd9bd73065681b5de | [
"Apache-2.0"
] | null | null | null | from .bandpass_filter import bandpass_filter
from .whiten import whiten
from .common_reference import common_reference
from .resample import resample
from .rectify import rectify
| 29.833333 | 46 | 0.860335 | 24 | 179 | 6.25 | 0.375 | 0.186667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111732 | 179 | 5 | 47 | 35.8 | 0.943396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
5461991006e5aba0d63313746db42f734392f06a | 3,753 | py | Python | app/views.py | ActuallyZach/in_app_purchase_receipt_verifier | f342809bcc2a16a34de3cccf965f0821a5bd552b | [
"Apache-2.0"
] | 1 | 2021-12-10T09:59:17.000Z | 2021-12-10T09:59:17.000Z | app/views.py | ActuallyZach/in_app_purchase_receipt_verifier | f342809bcc2a16a34de3cccf965f0821a5bd552b | [
"Apache-2.0"
] | null | null | null | app/views.py | ActuallyZach/in_app_purchase_receipt_verifier | f342809bcc2a16a34de3cccf965f0821a5bd552b | [
"Apache-2.0"
] | null | null | null | import json
import logging
import base64
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from lionheart.decorators import render_json
from lionheart.utils import JSONResponse
import requests
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
from Crypto import Random
logger = logging.getLogger(__name__)
@csrf_exempt
def verify_receipt(request):
data = {
'receipt-data': request.body.strip().decode("utf-8"),
'password': settings.APP_SPECIFIC_SHARED_SECRET
}
response = requests.post(settings.RECEIPT_VERIFICATION_URL, data=json.dumps(data))
payload = response.json()
response = JSONResponse(payload)
# If signing key is available, sign the payload to detect potential tampering.
if settings.BASE64_ENCODED_SIGNING_KEY:
key_data = base64.b64decode(settings.BASE64_ENCODED_SIGNING_KEY)
key = RSA.importKey(key_data)
data = json.dumps(payload).encode("utf8")
digest = SHA256.new()
digest.update(data)
use_salt = False
if use_salt:
rndfile = Random.new()
salt_data = rndfile.read(64)
salt = base64.b64encode(nonce_data)
digest.update(salt_data)
response['X-Salt'] = nonce
signer = PKCS1_v1_5.new(key)
signature = signer.sign(digest)
response['X-Signature'] = base64.b64encode(signature)
return response
def verify_receipt_scum(request):
data = {
'receipt-data': request.body.strip().decode("utf-8"),
'password': settings.APP_SPECIFIC_SHARED_SECRET_SCUM
}
response = requests.post(settings.RECEIPT_VERIFICATION_URL_SCUM, data=json.dumps(data))
payload = response.json()
response = JSONResponse(payload)
# If signing key is available, sign the payload to detect potential tampering.
if settings.BASE64_ENCODED_SIGNING_KEY_SCUM:
key_data = base64.b64decode(settings.BASE64_ENCODED_SIGNING_KEY_SCUM)
key = RSA.importKey(key_data)
data = json.dumps(payload).encode("utf8")
digest = SHA256.new()
digest.update(data)
use_salt = False
if use_salt:
rndfile = Random.new()
salt_data = rndfile.read(64)
salt = base64.b64encode(nonce_data)
digest.update(salt_data)
response['X-Salt'] = nonce
signer = PKCS1_v1_5.new(key)
signature = signer.sign(digest)
response['X-Signature'] = base64.b64encode(signature)
return response
def verify_receipt_jelly(request):
data = {
'receipt-data': request.body.strip().decode("utf-8"),
'password': settings.APP_SPECIFIC_SHARED_SECRET_JELLY
}
response = requests.post(settings.RECEIPT_VERIFICATION_URL_JELLY, data=json.dumps(data))
payload = response.json()
response = JSONResponse(payload)
# If signing key is available, sign the payload to detect potential tampering.
if settings.BASE64_ENCODED_SIGNING_KEY_JELLY:
key_data = base64.b64decode(settings.BASE64_ENCODED_SIGNING_KEY_JELLY)
key = RSA.importKey(key_data)
data = json.dumps(payload).encode("utf8")
digest = SHA256.new()
digest.update(data)
use_salt = False
if use_salt:
rndfile = Random.new()
salt_data = rndfile.read(64)
salt = base64.b64encode(nonce_data)
digest.update(salt_data)
response['X-Salt'] = nonce
signer = PKCS1_v1_5.new(key)
signature = signer.sign(digest)
response['X-Signature'] = base64.b64encode(signature)
return response
| 30.512195 | 92 | 0.674127 | 452 | 3,753 | 5.409292 | 0.19469 | 0.03681 | 0.031902 | 0.068712 | 0.826585 | 0.826585 | 0.826585 | 0.757873 | 0.757873 | 0.692843 | 0 | 0.030892 | 0.232347 | 3,753 | 122 | 93 | 30.762295 | 0.817772 | 0.061284 | 0 | 0.633333 | 0 | 0 | 0.039227 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0.033333 | 0.177778 | 0 | 0.244444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
54a3556bbae9e927f5a0a9894bcd6dca943767b9 | 32,065 | py | Python | app1.py | trs123s/ModernFarming | 28f99c090ed041486c3c3bbae1054cc9279261bd | [
"MIT"
] | null | null | null | app1.py | trs123s/ModernFarming | 28f99c090ed041486c3c3bbae1054cc9279261bd | [
"MIT"
] | null | null | null | app1.py | trs123s/ModernFarming | 28f99c090ed041486c3c3bbae1054cc9279261bd | [
"MIT"
] | null | null | null | # Importing essential libraries and modules
from flask import Flask, render_template, request, Markup, session, redirect, flash
import numpy as np
import pandas as pd
from utils.disease import disease_dic
from utils.fertilizer import fertilizer_dic
import requests
import config
import pickle
import io
import os
import sqlite3
from PIL import Image
from werkzeug.utils import secure_filename
# ==============================================================================================
# -------------------------LOADING THE TRAINED MODELS -----------------------------------------------
# Loading plant disease classification model
# disease_classes = ['Apple___Apple_scab',
# 'Apple___Black_rot',
# 'Apple___Cedar_apple_rust',
# 'Apple___healthy',
# 'Blueberry___healthy',
# 'Cherry_(including_sour)___Powdery_mildew',
# 'Cherry_(including_sour)___healthy',
# 'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot',
# 'Corn_(maize)___Common_rust_',
# 'Corn_(maize)___Northern_Leaf_Blight',
# 'Corn_(maize)___healthy',
# 'Grape___Black_rot',
# 'Grape___Esca_(Black_Measles)',
# 'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)',
# 'Grape___healthy',
# 'Orange___Haunglongbing_(Citrus_greening)',
# 'Peach___Bacterial_spot',
# 'Peach___healthy',
# 'Pepper,_bell___Bacterial_spot',
# 'Pepper,_bell___healthy',
# 'Potato___Early_blight',
# 'Potato___Late_blight',
# 'Potato___healthy',
# 'Raspberry___healthy',
# 'Soybean___healthy',
# 'Squash___Powdery_mildew',
# 'Strawberry___Leaf_scorch',
# 'Strawberry___healthy',
# 'Tomato___Bacterial_spot',
# 'Tomato___Early_blight',
# 'Tomato___Late_blight',
# 'Tomato___Leaf_Mold',
# 'Tomato___Septoria_leaf_spot',
# 'Tomato___Spider_mites Two-spotted_spider_mite',
# 'Tomato___Target_Spot',
# 'Tomato___Tomato_Yellow_Leaf_Curl_Virus',
# 'Tomato___Tomato_mosaic_virus',
# 'Tomato___healthy']
# disease_model_path = 'models/plant_disease_model.pth'
# disease_model = ResNet9(3, len(disease_classes))
# disease_model.load_state_dict(torch.load(
# disease_model_path, map_location=torch.device('cpu')))
# disease_model.eval()
# Loading crop recommendation model
crop_recommendation_model_path = 'models/RandomForest.pkl'
crop_recommendation_model = pickle.load(
open(crop_recommendation_model_path, 'rb'))
# =========================================================================================
# Custom functions for calculations
def weather_fetch(city_name):
"""
Fetch and returns the temperature and humidity of a city
:params: city_name
:return: temperature, humidity
"""
api_key = config.weather_api_key
base_url = "http://api.openweathermap.org/data/2.5/weather?"
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url)
x = response.json()
if x["cod"] != "404":
y = x["main"]
temperature = round((y["temp"] - 273.15), 2)
humidity = y["humidity"]
return temperature, humidity
else:
return None
# def predict_image(img, model=disease_model):
# """
# Transforms image to tensor and predicts disease label
# :params: image
# :return: prediction (string)
# """
# transform = transforms.Compose([
# transforms.Resize(256),
# transforms.ToTensor(),
# ])
# image = Image.open(io.BytesIO(img))
# img_t = transform(image)
# img_u = torch.unsqueeze(img_t, 0)
# # Get predictions from model
# yb = model(img_u)
# # Pick index with highest probability
# _, preds = torch.max(yb, dim=1)
# prediction = disease_classes[preds[0].item()]
# # Retrieve the class label
# return prediction
# ===============================================================================================
# ------------------------------------ FLASK APP -------------------------------------------------
app = Flask(__name__)
app.secret_key = "Mohit-gupta"
# render home page
@ app.route('/')
def home():
title = 'Harvestsolutions - Home'
return render_template('index.html', title=title)
@ app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@ app.route('/login')
def loginscreen():
title = 'Harvestsolutions - Login'
return render_template('login.html', title=title)
@ app.route('/register')
def registerscreen():
title = 'Harvestsolutions - Register'
return render_template('register.html', title=title)
# render crop form page
@ app.route('/crop')
def crop():
title = 'Harvestsolutions - Crop'
return render_template('fuser.html', title=title)
# render crop recommendation form page
@ app.route('/crop-recommend')
def crop_recommend():
title = 'Harvestsolutions - Crop Recommendation'
return render_template('crop.html', title=title)
@ app.route('/crop-register')
def crop_register():
title = 'Harvestsolutions - Crop Register'
return render_template('crop-register.html', title=title)
# render fertilizer recommendation form page
@ app.route('/fertilizer')
def fertilizer_recommendation():
title = 'Harvestsolutions - Fertilizer Suggestion'
return render_template('fertilizer.html', title=title)
@ app.route('/users')
def user_details():
title = 'Harvestsolutions - User Suggestion'
return render_template('user.html', title=title)
# render disease prediction input page
# ===============================================================================================
# RENDER PREDICTION PAGES
@ app.route('/login', methods =['POST'])
def checklogin():
UN = request.form['username']
_username = request.form['username']
PW = request.form['password']
sqlconnection = sqlite3.Connection("login.db")
cursor = sqlconnection.cursor()
query1 = "SELECT username, password From users WHERE username = '{un}' AND password = '{pw}'".format(un=UN, pw=PW)
cursor.execute(query1)
rows = cursor.fetchall()
print(rows)
if len(rows) == 1:
session['username'] = _username
return redirect('/')
else:
return redirect("/register")
@app.route('/logout')
def logout():
if 'username' in session:
session.pop('username',None)
return redirect('/')
# return render_template('logout.html');
else:
return '<p>user already logged out</p>'
UPLOAD_FOLDER = './static/upload'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
@ app.route('/register', methods = ['GET', 'POST'])
def registerpage():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
email = request.form['email']
# <img src="{{url_for('static', filename='Hermes.png')}}" align="middle" />
if 'file1' not in request.files:
return 'there is no file1 in form!'
file1 = request.files['file1']
# filename = str(username)
path = os.path.join(app.config['UPLOAD_FOLDER'], file1.filename)
file1.save(path)
print(path)
sqlconnection = sqlite3.Connection("login.db")
cursor = sqlconnection.cursor()
query1 = "INSERT into users (username,password,email,path) values (?,?,?,?)",(username,password,email,path)
cursor.execute(query1)
sqlconnection(query1)
sqlconnection.commit()
return render_template("login.html")
# return redirect('/')
return render_template("Register.html")
# render crop recommendation result page
@ app.route('/crop-predict', methods=['POST'])
def crop_prediction():
title = 'Harvestsolutions - Crop Recommendation'
if request.method == 'POST':
N = int(request.form['nitrogen'])
P = int(request.form['phosphorous'])
K = int(request.form['pottasium'])
ph = float(request.form['ph'])
rainfall = float(request.form['rainfall'])
# state = request.form.get("stt")
city = request.form.get("city")
if weather_fetch(city) != None:
temperature, humidity = weather_fetch(city)
data = np.array([[N, P, K, temperature, humidity, ph, rainfall]])
my_prediction = crop_recommendation_model.predict(data)
final_prediction = my_prediction[0]
return render_template('crop-result.html', prediction=final_prediction, title=title)
else:
return render_template('try_again.html', title=title)
@ app.route('/crop-registered', methods=['POST'])
def crop_register_success():
title = 'Harvestsolutions - Crop Registered'
msg = "msg"
if request.method == 'POST':
try:
name = request.form["name"]
phonenumber = request.form["phonenumber"]
adharnumber = request.form["adharnumber"]
area = request.form["area"]
cropg = request.form["cropg"]
cropr = request.form["cropr"]
nitrogen = request.form['nitrogen']
phosphorous = request.form['phosphorous']
pottasium = request.form['pottasium']
ph = request.form['ph']
rainfall = request.form['rainfall']
state = request.form['state']
city = request.form['city']
# city = request.form.get("city")
# temperature, humidity = weather_fetch(city)
with sqlite3.connect("fdetail.db") as con:
cur = con.cursor()
cur.execute("INSERT into FDetails (name, phonenumber, adharnumber, area, cropg, cropr, nitrogen, phosphorous, pottasium, ph, rainfall, state, city) values (?,?,?,?,?,?,?,?,?,?,?,?,?)",(name,phonenumber,adharnumber,area,cropg,cropr,nitrogen,phosphorous,pottasium,ph,rainfall,state,city))
con.commit()
# msg = "Data successfully Added"
except:
con.rollback()
# msg = "We can not add the employee to the list"
N = int(request.form['nitrogen'])
P = int(request.form['phosphorous'])
K = int(request.form['pottasium'])
ph = float(request.form['ph'])
rainfall = float(request.form['rainfall'])
# state = request.form.get("stt")
city = request.form.get("city")
if weather_fetch(city) != None:
temperature, humidity = weather_fetch(city)
data = np.array([[N, P, K, temperature, humidity, ph, rainfall]])
my_prediction = crop_recommendation_model.predict(data)
final_prediction = my_prediction[0]
return render_template('crop-result.html', prediction=final_prediction, title=title)
else:
return render_template('try_again.html', title=title)
# # render users details
@app.route("/view", methods=['POST'])
def view():
title = 'Harvestsolutions - User Recommendation'
if request.method == 'POST':
area = request.form["area"]
cropr = request.form["cropr"]
state = request.form["state"]
city = request.form["city"]
con = sqlite3.connect("fdetail.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
query = "SELECT rowid, * FROM FDetails WHERE"
query = query + " " + "area" + ">=" + str(area) + " AND"
query = query + " " + "cropr" + " LIKE " + "'"
query = query + str(cropr) + "'" + " AND"
query = query + " " + "state" + " LIKE " + "'"
query = query + str(state) + "'" + " AND"
query = query + " " + "city" + " LIKE " + "'"
query = query + str(city) + "'"
print(query)
cur.execute(query)
rows = cur.fetchall()
return render_template("view.html",rows = rows, title=title)
# @ app.route('/user-predict', methods=['POST'])
# def user_prediction():
# title = 'Harvestsolutions - User Recommendation'
# msg = "msg"
# if request.method == 'POST':
# try:
# name = request.form["name"]
# phonenumber = request.form["phonenumber"]
# adharnumber = request.form["adharnumber"]
# area = request.form["area"]
# cropg = request.form["cropg"]
# cropr = request.form["cropr"]
# nitrogen = request.form['nitrogen']
# phosphorous = request.form['phosphorous']
# pottasium = request.form['pottasium']
# ph = request.form['ph']
# rainfall = request.form['rainfall']
# state = request.form['state']
# city = request.form['city']
# # city = request.form.get("city")
# # temperature, humidity = weather_fetch(city)
# with sqlite3.connect("fdetail.db") as con:
# cur = con.cursor()
# cur.execute("INSERT into FDetails (name, phonenumber, adharnumber, area, cropg, cropr, nitrogen, phosphorous, pottasium, ph, rainfall, state, city) values (?,?,?,?,?,?,?,?,?,?,?,?,?)",(name,phonenumber,adharnumber,area,cropg,cropr,nitrogen,phosphorous,pottasium,ph,rainfall,state,city))
# con.commit()
# # msg = "Data successfully Added"
# except:
# con.rollback()
# # msg = "We can not add the employee to the list"
# N = int(request.form['nitrogen'])
# P = int(request.form['phosphorous'])
# K = int(request.form['pottasium'])
# ph = float(request.form['ph'])
# rainfall = float(request.form['rainfall'])
# # state = request.form.get("stt")
# city = request.form.get("city")
# if weather_fetch(city) != None:
# temperature, humidity = weather_fetch(city)
# data = np.array([[N, P, K, temperature, humidity, ph, rainfall]])
# my_prediction = crop_recommendation_model.predict(data)
# final_prediction = my_prediction[0]
# return render_template('user-view.html', prediction=final_prediction, title=title)
# else:
# return render_template('try_again.html', title=title)
# render fertilizer recommendation result page
@ app.route('/fertilizer-predict', methods=['POST'])
def fert_recommend():
title = 'Harvestsolutions - Fertilizer Suggestion'
if request.method == 'POST':
# cropname = request.form["cropname"]
phonenumber = request.form["phonenumber"]
adharnumber = request.form["adharnumber"]
con = sqlite3.connect("fdetail.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
query = "SELECT rowid, * FROM FDetails WHERE"
query = query + " " + "phonenumber" + " LIKE " + "'"
query = query + str(phonenumber) + "'" + " AND"
query = query + " " + "adharnumber" + " LIKE " + "'"
query = query + str(adharnumber) + "'"
print(query)
cur.execute(query)
rows = cur.fetchall()
print(rows[0])
# nitrogen = ''
# phosphorous = ''
# pottasium = ''
# cropname =
for row in rows:
print(str(row[0]) + " " + str(row[8]))
crop_name = row[7]
nitrogen = row[8]
phosphorous = row[9]
pottasium = row[10]
# nitrogen = request.form["nitrogen"]
# phosphorous = request.form["phosphorous"]
# pottasium = request.form["pottasium"]
# nitrogen = '50'
# phosphorous = '50'
# pottasium = '50'
# cropname =
# crop_name = str(cropname)
# crop_name = "rice"
N = int(nitrogen)
P = int(phosphorous)
K = int(pottasium)
# ph = float(request.form['ph'])
df = pd.read_csv('Data/fertilizer.csv')
nr = df[df['Crop'] == crop_name]['N'].iloc[0]
pr = df[df['Crop'] == crop_name]['P'].iloc[0]
kr = df[df['Crop'] == crop_name]['K'].iloc[0]
n = nr - N
p = pr - P
k = kr - K
temp = {abs(n): "N", abs(p): "P", abs(k): "K"}
max_value = temp[max(temp.keys())]
if max_value == "N":
if n < 0:
key = 'NHigh'
else:
key = "Nlow"
elif max_value == "P":
if p < 0:
key = 'PHigh'
else:
key = "Plow"
else:
if k < 0:
key = 'KHigh'
else:
key = "Klow"
response = Markup(str(fertilizer_dic[key]))
return render_template('fertilizer-result.html', recommendation=response, rows=rows, title=title)
# render disease prediction result page
@app.route('/disease-predict', methods=['GET', 'POST'])
def disease_prediction():
title = 'Harvestsolutions - Disease Detection'
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files.get('file')
if not file:
return render_template('disease.html', title=title)
try:
img = file.read()
prediction = predict_image(img)
prediction = Markup(str(disease_dic[prediction]))
return render_template('disease-result.html', prediction=prediction, title=title)
except:
pass
return render_template('disease.html', title=title)
# ===============================================================================================
if __name__ == '__main__':
app.run(debug=True)
# # Importing essential libraries and modules
# from flask import Flask, render_template, request, Markup
# import numpy as np
# import pandas as pd
# from utils.disease import disease_dic
# from utils.fertilizer import fertilizer_dic
# import requests
# import config
# import pickle
# import io
# import sqlite3
# from PIL import Image
# # ==============================================================================================
# # -------------------------LOADING THE TRAINED MODELS -----------------------------------------------
# # Loading plant disease classification model
# # disease_classes = ['Apple___Apple_scab',
# # 'Apple___Black_rot',
# # 'Apple___Cedar_apple_rust',
# # 'Apple___healthy',
# # 'Blueberry___healthy',
# # 'Cherry_(including_sour)___Powdery_mildew',
# # 'Cherry_(including_sour)___healthy',
# # 'Corn_(maize)___Cercospora_leaf_spot Gray_leaf_spot',
# # 'Corn_(maize)___Common_rust_',
# # 'Corn_(maize)___Northern_Leaf_Blight',
# # 'Corn_(maize)___healthy',
# # 'Grape___Black_rot',
# # 'Grape___Esca_(Black_Measles)',
# # 'Grape___Leaf_blight_(Isariopsis_Leaf_Spot)',
# # 'Grape___healthy',
# # 'Orange___Haunglongbing_(Citrus_greening)',
# # 'Peach___Bacterial_spot',
# # 'Peach___healthy',
# # 'Pepper,_bell___Bacterial_spot',
# # 'Pepper,_bell___healthy',
# # 'Potato___Early_blight',
# # 'Potato___Late_blight',
# # 'Potato___healthy',
# # 'Raspberry___healthy',
# # 'Soybean___healthy',
# # 'Squash___Powdery_mildew',
# # 'Strawberry___Leaf_scorch',
# # 'Strawberry___healthy',
# # 'Tomato___Bacterial_spot',
# # 'Tomato___Early_blight',
# # 'Tomato___Late_blight',
# # 'Tomato___Leaf_Mold',
# # 'Tomato___Septoria_leaf_spot',
# # 'Tomato___Spider_mites Two-spotted_spider_mite',
# # 'Tomato___Target_Spot',
# # 'Tomato___Tomato_Yellow_Leaf_Curl_Virus',
# # 'Tomato___Tomato_mosaic_virus',
# # 'Tomato___healthy']
# # disease_model_path = 'models/plant_disease_model.pth'
# # disease_model = ResNet9(3, len(disease_classes))
# # disease_model.load_state_dict(torch.load(
# # disease_model_path, map_location=torch.device('cpu')))
# # disease_model.eval()
# # Loading crop recommendation model
# crop_recommendation_model_path = 'models/RandomForest.pkl'
# crop_recommendation_model = pickle.load(
# open(crop_recommendation_model_path, 'rb'))
# # =========================================================================================
# # Custom functions for calculations
# def weather_fetch(city_name):
# """
# Fetch and returns the temperature and humidity of a city
# :params: city_name
# :return: temperature, humidity
# """
# api_key = config.weather_api_key
# base_url = "http://api.openweathermap.org/data/2.5/weather?"
# complete_url = base_url + "appid=" + api_key + "&q=" + city_name
# response = requests.get(complete_url)
# x = response.json()
# if x["cod"] != "404":
# y = x["main"]
# temperature = round((y["temp"] - 273.15), 2)
# humidity = y["humidity"]
# return temperature, humidity
# else:
# return None
# # def predict_image(img, model=disease_model):
# # """
# # Transforms image to tensor and predicts disease label
# # :params: image
# # :return: prediction (string)
# # """
# # transform = transforms.Compose([
# # transforms.Resize(256),
# # transforms.ToTensor(),
# # ])
# # image = Image.open(io.BytesIO(img))
# # img_t = transform(image)
# # img_u = torch.unsqueeze(img_t, 0)
# # # Get predictions from model
# # yb = model(img_u)
# # # Pick index with highest probability
# # _, preds = torch.max(yb, dim=1)
# # prediction = disease_classes[preds[0].item()]
# # # Retrieve the class label
# # return prediction
# # ===============================================================================================
# # ------------------------------------ FLASK APP -------------------------------------------------
# app = Flask(__name__)
# # render home page
# @ app.route('/')
# def home():
# title = 'Harvestsolutions - Home'
# return render_template('index.html', title=title)
# # render crop recommendation form page
# @ app.route('/crop')
# def crop():
# title = 'Harvestsolutions - Crop'
# return render_template('fuser.html', title=title)
# @ app.route('/crop-recommend')
# def crop_recommend():
# title = 'Harvestsolutions - Crop Recommendation'
# return render_template('crop.html', title=title)
# # render fertilizer recommendation form page
# @ app.route('/fertilizer')
# def fertilizer_recommendation():
# title = 'Harvestsolutions - Fertilizer Suggestion'
# return render_template('fertilizer.html', title=title)
# @ app.route('/users')
# def user_details():
# title = 'Harvestsolutions - User Suggestion'
# return render_template('user.html', title=title)
# # render disease prediction input page
# # ===============================================================================================
# # RENDER PREDICTION PAGES
# # render crop recommendation result page
# @ app.route('/crop-predict', methods=['POST'])
# def crop_prediction():
# title = 'Harvestsolutions - Crop Recommendation'
# msg = "msg"
# if request.method == 'POST':
# try:
# name = request.form["name"]
# phonenumber = request.form["phonenumber"]
# adharnumber = request.form["adharnumber"]
# area = request.form["area"]
# cropg = request.form["cropg"]
# cropr = request.form["cropr"]
# nitrogen = request.form['nitrogen']
# phosphorous = request.form['phosphorous']
# pottasium = request.form['pottasium']
# ph = request.form['ph']
# rainfall = request.form['rainfall']
# state = request.form['state']
# city = request.form['city']
# # city = request.form.get("city")
# # temperature, humidity = weather_fetch(city)
# with sqlite3.connect("fdetail.db") as con:
# cur = con.cursor()
# cur.execute("INSERT into FDetails (name, phonenumber, adharnumber, area, cropg, cropr, nitrogen, phosphorous, pottasium, ph, rainfall, state, city) values (?,?,?,?,?,?,?,?,?,?,?,?,?)",(name,phonenumber,adharnumber,area,cropg,cropr,nitrogen,phosphorous,pottasium,ph,rainfall,state,city))
# con.commit()
# # msg = "Data successfully Added"
# except:
# con.rollback()
# # msg = "We can not add the employee to the list"
# N = int(request.form['nitrogen'])
# P = int(request.form['phosphorous'])
# K = int(request.form['pottasium'])
# ph = float(request.form['ph'])
# rainfall = float(request.form['rainfall'])
# # state = request.form.get("stt")
# city = request.form.get("city")
# if weather_fetch(city) != None:
# temperature, humidity = weather_fetch(city)
# data = np.array([[N, P, K, temperature, humidity, ph, rainfall]])
# my_prediction = crop_recommendation_model.predict(data)
# final_prediction = my_prediction[0]
# return render_template('crop-result.html', prediction=final_prediction, title=title)
# else:
# return render_template('try_again.html', title=title)
# # # render users details
# @app.route("/view", methods=['POST'])
# def view():
# title = 'Harvestsolutions - User Recommendation'
# if request.method == 'POST':
# area = request.form["area"]
# cropr = request.form["cropr"]
# state = request.form["state"]
# city = request.form["city"]
# con = sqlite3.connect("fdetail.db")
# con.row_factory = sqlite3.Row
# cur = con.cursor()
# query = "SELECT rowid, * FROM FDetails WHERE"
# query = query + " " + "area" + ">=" + str(area) + " AND"
# query = query + " " + "cropr" + " LIKE " + "'"
# query = query + str(cropr) + "'" + " AND"
# query = query + " " + "state" + " LIKE " + "'"
# query = query + str(state) + "'" + " AND"
# query = query + " " + "city" + " LIKE " + "'"
# query = query + str(city) + "'"
# print(query)
# cur.execute(query)
# rows = cur.fetchall()
# return render_template("view.html",rows = rows, title=title)
# # @ app.route('/user-predict', methods=['POST'])
# # def user_prediction():
# # title = 'Harvestsolutions - User Recommendation'
# # msg = "msg"
# # if request.method == 'POST':
# # try:
# # name = request.form["name"]
# # phonenumber = request.form["phonenumber"]
# # adharnumber = request.form["adharnumber"]
# # area = request.form["area"]
# # cropg = request.form["cropg"]
# # cropr = request.form["cropr"]
# # nitrogen = request.form['nitrogen']
# # phosphorous = request.form['phosphorous']
# # pottasium = request.form['pottasium']
# # ph = request.form['ph']
# # rainfall = request.form['rainfall']
# # state = request.form['state']
# # city = request.form['city']
# # # city = request.form.get("city")
# # # temperature, humidity = weather_fetch(city)
# # with sqlite3.connect("fdetail.db") as con:
# # cur = con.cursor()
# # cur.execute("INSERT into FDetails (name, phonenumber, adharnumber, area, cropg, cropr, nitrogen, phosphorous, pottasium, ph, rainfall, state, city) values (?,?,?,?,?,?,?,?,?,?,?,?,?)",(name,phonenumber,adharnumber,area,cropg,cropr,nitrogen,phosphorous,pottasium,ph,rainfall,state,city))
# # con.commit()
# # # msg = "Data successfully Added"
# # except:
# # con.rollback()
# # # msg = "We can not add the employee to the list"
# # N = int(request.form['nitrogen'])
# # P = int(request.form['phosphorous'])
# # K = int(request.form['pottasium'])
# # ph = float(request.form['ph'])
# # rainfall = float(request.form['rainfall'])
# # # state = request.form.get("stt")
# # city = request.form.get("city")
# # if weather_fetch(city) != None:
# # temperature, humidity = weather_fetch(city)
# # data = np.array([[N, P, K, temperature, humidity, ph, rainfall]])
# # my_prediction = crop_recommendation_model.predict(data)
# # final_prediction = my_prediction[0]
# # return render_template('user-view.html', prediction=final_prediction, title=title)
# # else:
# # return render_template('try_again.html', title=title)
# # render fertilizer recommendation result page
# @ app.route('/fertilizer-predict', methods=['POST'])
# def fert_recommend():
# title = 'Harvestsolutions - Fertilizer Suggestion'
# crop_name = str(request.form['cropname'])
# N = int(request.form['nitrogen'])
# P = int(request.form['phosphorous'])
# K = int(request.form['pottasium'])
# # ph = float(request.form['ph'])
# df = pd.read_csv('Data/fertilizer.csv')
# nr = df[df['Crop'] == crop_name]['N'].iloc[0]
# pr = df[df['Crop'] == crop_name]['P'].iloc[0]
# kr = df[df['Crop'] == crop_name]['K'].iloc[0]
# n = nr - N
# p = pr - P
# k = kr - K
# temp = {abs(n): "N", abs(p): "P", abs(k): "K"}
# max_value = temp[max(temp.keys())]
# if max_value == "N":
# if n < 0:
# key = 'NHigh'
# else:
# key = "Nlow"
# elif max_value == "P":
# if p < 0:
# key = 'PHigh'
# else:
# key = "Plow"
# else:
# if k < 0:
# key = 'KHigh'
# else:
# key = "Klow"
# response = Markup(str(fertilizer_dic[key]))
# return render_template('fertilizer-result.html', recommendation=response, title=title)
# # render disease prediction result page
# @app.route('/disease-predict', methods=['GET', 'POST'])
# def disease_prediction():
# title = 'Harvestsolutions - Disease Detection'
# if request.method == 'POST':
# if 'file' not in request.files:
# return redirect(request.url)
# file = request.files.get('file')
# if not file:
# return render_template('disease.html', title=title)
# try:
# img = file.read()
# prediction = predict_image(img)
# prediction = Markup(str(disease_dic[prediction]))
# return render_template('disease-result.html', prediction=prediction, title=title)
# except:
# pass
# return render_template('disease.html', title=title)
# # ===============================================================================================
# if __name__ == '__main__':
# app.run(debug=True)
| 35.352811 | 308 | 0.547014 | 3,147 | 32,065 | 5.385446 | 0.111535 | 0.075938 | 0.043663 | 0.012981 | 0.896094 | 0.880871 | 0.876151 | 0.876151 | 0.869896 | 0.861872 | 0 | 0.004496 | 0.278528 | 32,065 | 906 | 309 | 35.391832 | 0.728106 | 0.59738 | 0 | 0.310606 | 0 | 0.003788 | 0.166572 | 0.008304 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0.018939 | 0.049242 | 0.003788 | 0.223485 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
54a9f62fe66f76339b9a08d84a47bd14992a5f1e | 97 | py | Python | morax/data/__init__.py | punidramesh/Hades | 0a7c4c632d23f41a0ee16c3fd4d9a7fc49c8f848 | [
"MIT"
] | 1 | 2021-06-12T11:31:26.000Z | 2021-06-12T11:31:26.000Z | morax/data/__init__.py | abhishekkushwaha4u/morax | 21fe16d7a76cfabfc57151c7a9ef1c6cd68d303e | [
"MIT"
] | null | null | null | morax/data/__init__.py | abhishekkushwaha4u/morax | 21fe16d7a76cfabfc57151c7a9ef1c6cd68d303e | [
"MIT"
] | 1 | 2021-05-26T08:24:31.000Z | 2021-05-26T08:24:31.000Z | import os, sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append("..") | 32.333333 | 60 | 0.742268 | 16 | 97 | 4.25 | 0.5 | 0.205882 | 0.382353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041237 | 97 | 3 | 61 | 32.333333 | 0.731183 | 0 | 0 | 0 | 0 | 0 | 0.020408 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
49bca0938ff01b4d509de00bf9441fde57d68638 | 28,413 | py | Python | tensorflow/python/kernel_tests/check_ops_test.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 101 | 2016-12-03T11:40:52.000Z | 2017-12-23T02:02:03.000Z | tensorflow/python/kernel_tests/check_ops_test.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 9 | 2016-12-14T03:27:46.000Z | 2017-09-13T02:29:07.000Z | tensorflow/python/kernel_tests/check_ops_test.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 47 | 2016-12-04T12:37:24.000Z | 2018-01-14T18:13:07.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.check_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class AssertProperIterableTest(tf.test.TestCase):
def test_single_tensor_raises(self):
tensor = tf.constant(1)
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(tensor)
def test_single_sparse_tensor_raises(self):
ten = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(ten)
def test_single_ndarray_raises(self):
array = np.array([1, 2, 3])
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(array)
def test_single_string_raises(self):
mystr = "hello"
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(mystr)
def test_non_iterable_object_raises(self):
non_iterable = 1234
with self.assertRaisesRegexp(TypeError, "to be iterable"):
tf.assert_proper_iterable(non_iterable)
def test_list_does_not_raise(self):
list_of_stuff = [tf.constant([11, 22]), tf.constant([1, 2])]
tf.assert_proper_iterable(list_of_stuff)
def test_generator_does_not_raise(self):
generator_of_stuff = (tf.constant([11, 22]), tf.constant([1, 2]))
tf.assert_proper_iterable(generator_of_stuff)
class AssertEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies(
[tf.assert_equal(big, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval()
def test_raises_when_less(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_equal(small, big)]):
out = tf.identity(small)
with self.assertRaisesOpError("small.*big"):
out.eval()
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
small_2 = tf.constant([1, 2], name="small_2")
with tf.control_dependencies([tf.assert_equal(small, small_2)]):
out = tf.identity(small)
out.eval()
def test_raises_when_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
small_2 = tf.constant([1, 1], name="small_2")
with self.assertRaisesRegexp(ValueError, "must be"):
with tf.control_dependencies([tf.assert_equal(small, small_2)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertLessTest(tf.test.TestCase):
def test_raises_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies(
[tf.assert_less(small, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*small.*small"):
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_less(big, small)]):
out = tf.identity(small)
with self.assertRaisesOpError("big.*small"):
out.eval()
def test_doesnt_raise_when_less(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 2], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_less(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertLessEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_less_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies(
[tf.assert_less_equal(big, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*big.*small"):
out.eval()
def test_doesnt_raise_when_less_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 1], name="big")
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 1], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_less_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertGreaterTest(tf.test.TestCase):
def test_raises_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies(
[tf.assert_greater(small, small, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*small.*small"):
out.eval()
def test_raises_when_less(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_greater(small, big)]):
out = tf.identity(big)
with self.assertRaisesOpError("small.*big"):
out.eval()
def test_doesnt_raise_when_greater(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_greater(big, small)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_greater_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_greater(big, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 2], name="big")
with self.assertRaisesRegexp(ValueError, "must be"):
with tf.control_dependencies([tf.assert_greater(big, small)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_greater(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertGreaterEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_greater_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies(
[tf.assert_greater_equal(small, big, message="fail")]):
out = tf.identity(small)
with self.assertRaisesOpError("fail.*small.*big"):
out.eval()
def test_doesnt_raise_when_greater_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_greater_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 1], name="big")
with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="big")
big = tf.constant([3, 1], name="small")
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
with tf.control_dependencies([tf.assert_greater_equal(big, small)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_greater_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertNegativeTest(tf.test.TestCase):
def test_doesnt_raise_when_negative(self):
with self.test_session():
frank = tf.constant([-1, -2], name="frank")
with tf.control_dependencies([tf.assert_negative(frank)]):
out = tf.identity(frank)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
doug = tf.constant([1, 2], name="doug")
with tf.control_dependencies([tf.assert_negative(doug, message="fail")]):
out = tf.identity(doug)
with self.assertRaisesOpError("fail.*doug"):
out.eval()
def test_raises_when_zero(self):
with self.test_session():
claire = tf.constant([0], name="claire")
with tf.control_dependencies([tf.assert_negative(claire)]):
out = tf.identity(claire)
with self.assertRaisesOpError("claire"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is negative when it satisfies:
# For every element x_i in x, x_i < 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_negative(empty)]):
out = tf.identity(empty)
out.eval()
class AssertPositiveTest(tf.test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
freddie = tf.constant([-1, -2], name="freddie")
with tf.control_dependencies(
[tf.assert_positive(freddie, message="fail")]):
out = tf.identity(freddie)
with self.assertRaisesOpError("fail.*freddie"):
out.eval()
def test_doesnt_raise_when_positive(self):
with self.test_session():
remmy = tf.constant([1, 2], name="remmy")
with tf.control_dependencies([tf.assert_positive(remmy)]):
out = tf.identity(remmy)
out.eval()
def test_raises_when_zero(self):
with self.test_session():
meechum = tf.constant([0], name="meechum")
with tf.control_dependencies([tf.assert_positive(meechum)]):
out = tf.identity(meechum)
with self.assertRaisesOpError("meechum"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is positive when it satisfies:
# For every element x_i in x, x_i > 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_positive(empty)]):
out = tf.identity(empty)
out.eval()
class AssertRankTest(tf.test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(
ValueError, "fail.*my_tensor.*must have rank 1"):
with tf.control_dependencies(
[tf.assert_rank(tensor, desired_rank, message="fail")]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies(
[tf.assert_rank(tensor, desired_rank, message="fail")]):
with self.assertRaisesOpError("fail.*my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 0
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 2
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_raises_if_rank_is_not_scalar_static(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
tf.assert_rank(tensor, np.array([], dtype=np.int32))
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.test_session():
tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
rank_tensor = tf.placeholder(tf.int32, name="rank_tensor")
with self.assertRaisesOpError("Rank must be a scalar"):
with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
tf.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
def test_raises_if_rank_is_not_integer_static(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
tf.assert_rank(tensor, .5)
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
rank_tensor = tf.placeholder(tf.float32, name="rank_tensor")
with self.assertRaisesRegexp(TypeError,
"must be of type <dtype: 'int32'>"):
with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
tf.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankAtLeastTest(tf.test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank at least 1"):
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_ten_doesnt_raise_raise_if_rank_too_large_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_ten_doesnt_raise_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 2
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
class AssertNonNegativeTest(tf.test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
zoe = tf.constant([-1, -2], name="zoe")
with tf.control_dependencies([tf.assert_non_negative(zoe)]):
out = tf.identity(zoe)
with self.assertRaisesOpError("zoe"):
out.eval()
def test_doesnt_raise_when_zero_and_positive(self):
with self.test_session():
lucas = tf.constant([0, 2], name="lucas")
with tf.control_dependencies([tf.assert_non_negative(lucas)]):
out = tf.identity(lucas)
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-negative when it satisfies:
# For every element x_i in x, x_i >= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_non_negative(empty)]):
out = tf.identity(empty)
out.eval()
class AssertNonPositiveTest(tf.test.TestCase):
def test_doesnt_raise_when_zero_and_negative(self):
with self.test_session():
tom = tf.constant([0, -2], name="tom")
with tf.control_dependencies([tf.assert_non_positive(tom)]):
out = tf.identity(tom)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
rachel = tf.constant([0, 2], name="rachel")
with tf.control_dependencies([tf.assert_non_positive(rachel)]):
out = tf.identity(rachel)
with self.assertRaisesOpError("rachel"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-positive when it satisfies:
# For every element x_i in x, x_i <= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_non_positive(empty)]):
out = tf.identity(empty)
out.eval()
class AssertIntegerTest(tf.test.TestCase):
def test_doesnt_raise_when_integer(self):
with self.test_session():
integers = tf.constant([1, 2], name="integers")
with tf.control_dependencies([tf.assert_integer(integers)]):
out = tf.identity(integers)
out.eval()
def test_raises_when_float(self):
with self.test_session():
floats = tf.constant([1.0, 2.0], name="floats")
with self.assertRaisesRegexp(TypeError, "Expected.*integer"):
tf.assert_integer(floats)
class IsStrictlyIncreasingTest(tf.test.TestCase):
def test_constant_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([1, 0, -1]).eval())
def test_2d_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([[1, 3], [2, 4]]).eval())
def test_increasing_tensor_is_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([[-1, 2], [3, 4]]).eval())
def test_tensor_with_one_element_is_strictly_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([1]).eval())
def test_empty_tensor_is_strictly_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([]).eval())
class IsNonDecreasingTest(tf.test.TestCase):
def test_constant_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
self.assertFalse(tf.is_non_decreasing([3, 2, 1]).eval())
def test_2d_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
self.assertFalse(tf.is_non_decreasing([[1, 3], [2, 4]]).eval())
def test_increasing_rank_one_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([[-1, 2], [3, 3]]).eval())
def test_tensor_with_one_element_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1]).eval())
def test_empty_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([]).eval())
if __name__ == "__main__":
tf.test.main()
| 38.395946 | 80 | 0.668567 | 3,806 | 28,413 | 4.728324 | 0.058592 | 0.054679 | 0.056012 | 0.088686 | 0.869415 | 0.850245 | 0.840687 | 0.809624 | 0.779784 | 0.749667 | 0 | 0.013167 | 0.200788 | 28,413 | 739 | 81 | 38.447903 | 0.779329 | 0.04966 | 0 | 0.695205 | 0 | 0 | 0.041606 | 0.001557 | 0 | 0 | 0 | 0 | 0.244863 | 1 | 0.155822 | false | 0 | 0.008562 | 0 | 0.190068 | 0.001712 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
49eb92c54d2dec5992906faf0003895412a6f8e2 | 11,496 | py | Python | menpo/transform/test/h_align_test.py | yuxiang-zhou/menpo | 01deaf3808cbe7a3d9db5542ac9d9f53cd81743a | [
"BSD-3-Clause"
] | 1 | 2021-04-20T00:36:57.000Z | 2021-04-20T00:36:57.000Z | menpo/transform/test/h_align_test.py | yuxiang-zhou/menpo | 01deaf3808cbe7a3d9db5542ac9d9f53cd81743a | [
"BSD-3-Clause"
] | 1 | 2019-03-09T16:01:46.000Z | 2019-03-09T16:01:46.000Z | menpo/transform/test/h_align_test.py | yuxiang-zhou/menpo | 01deaf3808cbe7a3d9db5542ac9d9f53cd81743a | [
"BSD-3-Clause"
] | 1 | 2020-05-01T09:55:57.000Z | 2020-05-01T09:55:57.000Z | import numpy as np
from numpy.testing import assert_allclose, raises
from menpo.shape import PointCloud
from menpo.transform import (Affine, AlignmentAffine,
Similarity, AlignmentSimilarity,
Rotation, AlignmentRotation,
Translation, AlignmentTranslation,
UniformScale, AlignmentUniformScale)
# TODO check composition works correctly on all alignment methods
# AFFINE
def test_align_2d_affine():
linear_component = np.array([[1, -6],
[-3, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
affine = Affine(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = affine.apply(source)
# estimate the transform from source and target
estimate = AlignmentAffine(source, target)
# check the estimates is correct
assert_allclose(affine.h_matrix, estimate.h_matrix)
def test_align_2d_affine_compose_target():
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = UniformScale(2.0, n_dims=2).apply(source)
original_estimate = AlignmentAffine(source, target)
new_estimate = original_estimate.copy()
new_estimate.compose_after_from_vector_inplace(
np.array([0, 0, 0, 0, 1, 1.]))
estimate_target = new_estimate.target
correct_target = original_estimate.compose_after(
Translation([1, 1.])).apply(source)
assert_allclose(estimate_target.points, correct_target.points)
def test_align_2d_affine_set_target():
linear_component = np.array([[1, -6],
[-3, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
affine = Affine(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = affine.apply(source)
# estimate the transform from source and source
estimate = AlignmentAffine(source, source)
# and set the target
estimate.set_target(target)
# check the estimates is correct
assert_allclose(affine.h_matrix, estimate.h_matrix)
def test_align_2d_affine_as_non_alignment():
linear_component = np.array([[1, -6],
[-3, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
affine = Affine(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = affine.apply(source)
# estimate the transform from source and source
estimate = AlignmentAffine(source, source)
# and set the h_matrix
non_align = estimate.as_non_alignment()
# check the estimates is correct
assert_allclose(non_align.h_matrix, estimate.h_matrix)
assert(type(non_align) == Affine)
# TODO check from_vector, from_vector_inplace works correctly
# SIMILARITY
def test_align_2d_similarity():
linear_component = np.array([[2, -6],
[6, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
similarity = Similarity(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = similarity.apply(source)
# estimate the transform from source and target
estimate = AlignmentSimilarity(source, target)
# check the estimates is correct
assert_allclose(similarity.h_matrix,
estimate.h_matrix)
def test_align_2d_similarity_set_target():
linear_component = np.array([[2, -6],
[6, 2]])
translation_component = np.array([7, -8])
h_matrix = np.eye(3, 3)
h_matrix[:-1, :-1] = linear_component
h_matrix[:-1, -1] = translation_component
similarity = Similarity(h_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = similarity.apply(source)
# estimate the transform from source to source
estimate = AlignmentSimilarity(source, source, allow_mirror=True)
# and set the target
estimate.set_target(target)
# check the estimates is correct
assert_allclose(similarity.h_matrix,
estimate.h_matrix)
# ROTATION
def test_align_2d_rotation():
rotation_matrix = np.array([[0, 1],
[-1, 0]])
rotation = Rotation(rotation_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = rotation.apply(source)
# estimate the transform from source and target
estimate = AlignmentRotation(source, target)
# check the estimates is correct
assert_allclose(rotation.h_matrix,
estimate.h_matrix, atol=1e-14)
def test_align_2d_rotation_allow_mirror():
s_init = PointCloud(np.array([[-1., 1.], [1., 1.], [1., -1.], [-1., -1.]]))
s_trg = PointCloud(np.array([[1., -1.], [1., 1.], [-1., 1.], [-1., -1.]]))
# estimate the transform from source and target with mirroring allowed
tr = AlignmentRotation(s_init, s_trg, allow_mirror=True)
s_final = tr.apply(s_init)
assert_allclose(s_final.points, s_trg.points, atol=1e-14)
# estimate the transform from source and target with mirroring allowed
tr = AlignmentRotation(s_init, s_trg, allow_mirror=False)
s_final = tr.apply(s_init)
assert_allclose(s_final.points, np.array([[-1., -1.], [-1., 1.], [1., 1.],
[1., -1.]]), atol=1e-14)
def test_align_2d_rotation_set_target():
rotation_matrix = np.array([[0, 1],
[-1, 0]])
rotation = Rotation(rotation_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = rotation.apply(source)
# estimate the transform from source and source
estimate = AlignmentRotation(source, source)
# and set the target
estimate.set_target(target)
# check the estimates is correct
assert_allclose(rotation.h_matrix,
estimate.h_matrix, atol=1e-14)
def test_align_2d_rotation_set_rotation_matrix():
rotation_matrix = np.array([[0, 1],
[-1, 0]])
rotation = Rotation(rotation_matrix)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = rotation.apply(source)
# estimate the transform from source and source
estimate = AlignmentRotation(source, source)
# and set the target
estimate.set_rotation_matrix(rotation.rotation_matrix)
# check the estimates is correct
assert_allclose(target.points,
estimate.target.points, atol=1e-14)
# UNIFORM SCALE
def test_align_2d_uniform_scale():
scale = UniformScale(2.5, 2)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = scale.apply(source)
# estimate the transform from source and target
estimate = AlignmentUniformScale(source, target)
# check the estimates is correct
assert_allclose(scale.h_matrix, estimate.h_matrix)
def test_align_2d_uniform_scale_set_target():
scale = UniformScale(2.5, 2)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = scale.apply(source)
# estimate the transform from source and source
estimate = AlignmentUniformScale(source, source)
# and set the target
estimate.set_target(target)
# check the estimates is correct
assert_allclose(scale.h_matrix, estimate.h_matrix)
# TRANSLATION
def test_align_2d_translation():
t_vec = np.array([1, 2])
translation = Translation(t_vec)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = translation.apply(source)
# estimate the transform from source and target
estimate = AlignmentTranslation(source, target)
# check the estimates is correct
assert_allclose(translation.h_matrix,
estimate.h_matrix)
def test_align_2d_translation_set_target():
t_vec = np.array([1, 2])
translation = Translation(t_vec)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = translation.apply(source)
# estimate the transform from source to source..
estimate = AlignmentTranslation(source, source)
# and change the target.
estimate.set_target(target)
# check the estimates is correct
assert_allclose(translation.h_matrix,
estimate.h_matrix)
def test_align_2d_translation_from_vector_inplace():
t_vec = np.array([1, 2])
translation = Translation(t_vec)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = translation.apply(source)
# estimate the transform from source to source..
estimate = AlignmentTranslation(source, source)
# and update from_vector
estimate._from_vector_inplace(t_vec)
# check the estimates is correct
assert_allclose(target.points,
estimate.target.points)
def test_align_2d_translation_from_vector():
t_vec = np.array([1, 2])
translation = Translation(t_vec)
source = PointCloud(np.array([[0, 1],
[1, 1],
[-1, -5],
[3, -5]]))
target = translation.apply(source)
# estimate the transform from source to source..
estimate = AlignmentTranslation(source, source)
# and update from_vector
new_est = estimate.from_vector(t_vec)
# check the original is unchanged
assert_allclose(estimate.source.points, source.points)
assert_allclose(estimate.target.points, source.points)
# check the new estimate has the source and target correct
assert_allclose(new_est.source.points, source.points)
assert_allclose(new_est.target.points, target.points)
| 37.203883 | 79 | 0.560282 | 1,293 | 11,496 | 4.802011 | 0.075019 | 0.026091 | 0.023192 | 0.019327 | 0.799163 | 0.76518 | 0.739733 | 0.726043 | 0.726043 | 0.710904 | 0 | 0.035251 | 0.32881 | 11,496 | 308 | 80 | 37.324675 | 0.76944 | 0.142397 | 0 | 0.730594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003247 | 0.100457 | 1 | 0.073059 | false | 0 | 0.018265 | 0 | 0.091324 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
49ebde28b5a79e5a798874a3c9f1bbe9667d7547 | 63,588 | py | Python | util.py | qcwthu/Continual_Fewshot_Relation_Learning | 9d94a9ddc9de6300deec1d5bd434cda0a7a3f1eb | [
"MIT"
] | null | null | null | util.py | qcwthu/Continual_Fewshot_Relation_Learning | 9d94a9ddc9de6300deec1d5bd434cda0a7a3f1eb | [
"MIT"
] | null | null | null | util.py | qcwthu/Continual_Fewshot_Relation_Learning | 9d94a9ddc9de6300deec1d5bd434cda0a7a3f1eb | [
"MIT"
] | null | null | null | import sys
import os
import random
import torch
import numpy as np
import re
import json
from collections import defaultdict
import hashlib
def set_seed(config, seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if config['n_gpu'] > 0 and torch.cuda.is_available() and config['use_gpu']:
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def readtrain(filename):
f = open(filename,'r')
res = {}
while True:
line = f.readline().strip()
if not line:
break
content = line.split("\t")
#if len(content) != 7:
if len(content) != 9:
print("error!!!")
exit -1
else:
rel = int(content[0])
if rel not in res:
res[rel] = []
res[rel].append(line)
else:
res[rel].append(line)
f.close()
return res
def transtonpy(data,tokenizer):
for sample in data:
tokens = tokenizer.tokenize(sample[2])
max_length = 128
length = min(len(tokens), max_length)
tokens = tokenizer.convert_tokens_to_ids(tokens, unk_id=tokenizer.vocab['[UNK]'])
if (len(tokens) > max_length):
tokens = tokens[:max_length]
sample[2] = tokens
sample.append(length)
return np.asarray(data)
def cotinualfewshotpreprocess(config,tokenizer):
rel_index = np.load("data/fewrel/rel_index.npy")
#rel_cluster_label = np.load("rel_cluster_label.npy")
alltraindata = readtrain("data/fewrel/train_all.txt")
print(len(alltraindata))
allrel = []
for i in alltraindata.keys():
allrel.append(i)
print(i, "\t", len(alltraindata[i]))
print(len(allrel))
alltestdata = readtrain("data/fewrel/test.txt")
print(len(alltestdata))
for i in alltestdata.keys():
print(i, "\t", len(alltestdata[i]))
samplenum = 1
basenum = 10
datanumforbaserel = 100
allnum = len(allrel)
waysnum = 10
howmanyways = (allnum - basenum) // waysnum
shotnum = 100
filenum = 9999
numforeverytestrel = 100
for i in range(samplenum):
####sample basenum base relations
sample_list = random.sample(allrel, basenum)
#sample_list = [6, 12, 14, 17, 21, 25, 49, 64, 65, 78]
print(sample_list)
####
tousetraindata = []
for k in alltraindata.keys():
if k in sample_list:
trainsamplelist = random.sample(alltraindata[k], datanumforbaserel)
#trainsamplelist = random.sample(alltraindata[k], len(alltraindata[k]))
tousetraindata.extend(trainsamplelist)
else:
trainsamplelist = random.sample(alltraindata[k], shotnum)
#trainsamplelist = random.sample(alltraindata[k], len(alltraindata[k]))
tousetraindata.extend(trainsamplelist)
random.shuffle(tousetraindata) ###train
print(len(tousetraindata))
tousetestdata = []
for k in alltestdata.keys():
testsamplelist = random.sample(alltestdata[k], numforeverytestrel)
#testsamplelist = random.sample(alltestdata[k], len(alltestdata[k]))
tousetestdata.extend(testsamplelist)
random.shuffle(tousetestdata)
print(len(tousetestdata))
print(rel_index)
print(sample_list)
newlabeltasknum = []
for k in range(allnum):
if rel_index[k] in sample_list:
newlabeltasknum.append(howmanyways)
else:
newlabeltasknum.append(-1)
print(newlabeltasknum)
###other howmanyways tasks
temptaskindex = []
for k in range(howmanyways):
for j in range(waysnum):
temptaskindex.append(k)
random.shuffle(temptaskindex)
realindex = 0
for k in range(allnum):
if newlabeltasknum[k] != -1:
continue
else:
newlabeltasknum[k] = temptaskindex[realindex]
realindex += 1
print(realindex)
print(newlabeltasknum)
newname = "data/fewrel/CFRLdatatest_10_100_10_"+str(filenum)+"/rel_cluster_label_" + str(i) + ".npy"
np.save(newname, np.asarray(newlabeltasknum))
traintxtname = "data/fewrel/CFRLdatatest_10_100_10_"+str(filenum)+"/train_" + str(i) + ".txt"
fw = open(traintxtname, "w")
for line in tousetraindata:
fw.write(line + "\n")
fw.close()
testtxtname = "data/fewrel/CFRLdatatest_10_100_10_"+str(filenum)+"/test_" + str(i) + ".txt"
fw = open(testtxtname, "w")
for line in tousetestdata:
fw.write(line + "\n")
fw.close()
trainnpyname = "data/fewrel/CFRLdatatest_10_100_10_"+str(filenum)+"/train_" + str(i) + ".npy"
saveasnpytrain = []
for l in range(0, len(tousetraindata)):
items = tousetraindata[l].split("\t")
relation_ix = int(items[0])
candidate_ixs = [int(ix) for ix in items[1].split()]
question = items[2].split('\n')[0]
saveasnpytrain.append([relation_ix, candidate_ixs, question])
# print(saveasnpytrain[0])
tosavetrain = transtonpy(saveasnpytrain, tokenizer)
np.save(trainnpyname, tosavetrain)
testnpyname = "data/fewrel/CFRLdatatest_10_100_10_"+str(filenum)+"/test_" + str(i) + ".npy"
saveasnpytest = []
for l in range(0, len(tousetestdata)):
items = tousetestdata[l].split("\t")
relation_ix = int(items[0])
candidate_ixs = [int(ix) for ix in items[1].split()]
question = items[2].split('\n')[0]
saveasnpytest.append([relation_ix, candidate_ixs, question])
tosavetest = transtonpy(saveasnpytest, tokenizer)
np.save(testnpyname, tosavetest)
newtrain1 = np.load("data/fewrel/CFRLdatatest_10_100_10_"+str(filenum)+"/train_0.npy",allow_pickle=True)
print(newtrain1.shape)
print(newtrain1[0])
def getnegfrombatch(oneindex,firstent,firstentindex,secondent,secondentindex,sentences,lengths,getnegfromnum,allnum,labels,neg_labels):
# thislabel = labels[oneindex]
###get information
thissentence = sentences[oneindex].numpy().tolist()
#print(thissentence)
thislength = lengths[oneindex]
#print(thislength)
thisfirstent = firstent[oneindex]
#print(thisfirstent)
thisfirstentindex = firstentindex[oneindex].numpy().tolist()
#print(thisfirstentindex)
headstart = thisfirstentindex[0]
#print(headstart)
headend = thisfirstentindex[-1]
#print(headend)
posheadlength = len(thisfirstentindex)
#print(posheadlength)
thissecondent = secondent[oneindex]
#print(thissecondent)
thissecondentindex = secondentindex[oneindex].numpy().tolist()
#print(thissecondentindex)
tailstart = thissecondentindex[0]
#print(tailstart)
tailend = thissecondentindex[-1]
#print(tailend)
postaillength = len(thissecondentindex)
#print(postaillength)
negres = []
lenres = []
for j in range(getnegfromnum):
touseindex = (oneindex + j + 1) % allnum
negusehead = firstent[touseindex].numpy().tolist()
negheadlength = len(negusehead)
negusetail = secondent[touseindex].numpy().tolist()
negtaillength = len(negusetail)
negsamplechangehead = thissentence[0:headstart] + negusehead + thissentence[headend + 1:]
changeheadlength = thislength - posheadlength + negheadlength
negsamplechangetail = thissentence[0:tailstart] + negusetail + thissentence[tailend + 1:]
changetaillength = thislength - postaillength + negtaillength
#######get 2
negres.append(negsamplechangehead)
lenres.append(changeheadlength)
negres.append(negsamplechangetail)
lenres.append(changetaillength)
######get 1
return np.asarray(negres),np.asarray(lenres)
def getnegfrombatchnew(oneindex,firstent,firstentindex,secondent,secondentindex,sentences,lengths,getnegfromnum,allnum,labels,neg_labels):
# thislabel = labels[oneindex]
###get information
thissentence = sentences[oneindex].numpy().tolist()
#print(thissentence)
thislength = lengths[oneindex]
#print(thislength)
thisfirstent = firstent[oneindex]
#print(thisfirstent)
thisfirstentindex = firstentindex[oneindex].numpy().tolist()
#print(thisfirstentindex)
headstart = thisfirstentindex[0]
#print(headstart)
headend = thisfirstentindex[-1]
#print(headend)
posheadlength = len(thisfirstentindex)
#print(posheadlength)
thissecondent = secondent[oneindex]
#print(thissecondent)
thissecondentindex = secondentindex[oneindex].numpy().tolist()
#print(thissecondentindex)
tailstart = thissecondentindex[0]
#print(tailstart)
tailend = thissecondentindex[-1]
#print(tailend)
postaillength = len(thissecondentindex)
#print(postaillength)
negres = []
lenres = []
for j in range(getnegfromnum):
touseindex = (oneindex + j + 1) % allnum
negusehead = firstent[touseindex].numpy().tolist()
negheadlength = len(negusehead)
negusetail = secondent[touseindex].numpy().tolist()
negtaillength = len(negusetail)
negsamplechangehead = thissentence[0:headstart] + negusehead + thissentence[headend + 1:]
changeheadlength = thislength - posheadlength + negheadlength
negsamplechangetail = thissentence[0:tailstart] + negusetail + thissentence[tailend + 1:]
changetaillength = thislength - postaillength + negtaillength
#######get 1
aa = random.randint(0,1)
if aa == 1:
negres.append(negsamplechangehead)
lenres.append(changeheadlength)
else:
negres.append(negsamplechangetail)
lenres.append(changetaillength)
return np.asarray(negres),np.asarray(lenres)
def getnegfrombatch_bert(oneindex,firstent,firstentindex,secondent,secondentindex,sentences,lengths,getnegfromnum,allnum,labels,neg_labels,config):
thissentence = sentences[oneindex].cpu().numpy().tolist()
thislength = lengths[oneindex]
thisfirstent = firstent[oneindex]
thisfirstentindex = firstentindex[oneindex].numpy().tolist()
headstart = thisfirstentindex[0]
headend = thisfirstentindex[-1]
posheadlength = len(thisfirstentindex)
thissecondent = secondent[oneindex]
thissecondentindex = secondentindex[oneindex].numpy().tolist()
tailstart = thissecondentindex[0]
tailend = thissecondentindex[-1]
postaillength = len(thissecondentindex)
negres = []
maskres = []
for j in range(getnegfromnum):
touseindex = (oneindex + j + 1) % allnum
negusehead = firstent[touseindex].numpy().tolist()
negheadlength = len(negusehead)
negusetail = secondent[touseindex].numpy().tolist()
negtaillength = len(negusetail)
negsamplechangehead = thissentence[0:headstart] + negusehead + thissentence[headend + 1:]
changeheadlength = thislength - posheadlength + negheadlength
if len(negsamplechangehead) > config["max_length"]:
negsamplechangehead = negsamplechangehead[0:config["max_length"]]
for i in range(len(negsamplechangehead), config["max_length"]):
negsamplechangehead.append(0)
mask1 = []
for i in range(0, changeheadlength):
mask1.append(1)
for i in range(changeheadlength, config["max_length"]):
mask1.append(0)
if len(mask1) > config["max_length"]:
mask1 = mask1[0:config["max_length"]]
negsamplechangetail = thissentence[0:tailstart] + negusetail + thissentence[tailend + 1:]
changetaillength = thislength - postaillength + negtaillength
if len(negsamplechangetail) > config["max_length"]:
negsamplechangetail = negsamplechangetail[0:config["max_length"]]
for i in range(len(negsamplechangetail), config["max_length"]):
negsamplechangetail.append(0)
mask2 = []
for i in range(0, changetaillength):
mask2.append(1)
for i in range(changetaillength, config["max_length"]):
mask2.append(0)
if len(mask2) > config["max_length"]:
mask2 = mask2[0:config["max_length"]]
if len(mask1) != len(mask2):
print(len(mask1))
print(len(mask2))
print(mask1)
print(mask2)
negres.append(negsamplechangehead)
maskres.append(mask1)
negres.append(negsamplechangetail)
maskres.append(mask2)
return np.asarray(negres),np.asarray(maskres)
def getnegforonerel(mem_set,key,neg_mem_data):
negusehead = mem_set[key]['1']['h'][0]
negheadlength = len(negusehead)
negusetail = mem_set[key]['1']['t'][0]
negtaillength = len(negusetail)
possen = mem_set[key]['0'][0][2] ####positive sentence tokens
poslen = mem_set[key]['0'][0][7]
poshead = mem_set[key]['0'][0][3]
posheadindex = mem_set[key]['0'][0][4]
headstart = posheadindex[0]
headend = posheadindex[-1]
posheadlength = len(posheadindex)
postail = mem_set[key]['0'][0][5]
postailindex = mem_set[key]['0'][0][6]
tailstart = postailindex[0]
tailend = postailindex[-1]
postaillength = len(postailindex)
negsamplechangehead = possen[0:headstart] + negusehead + possen[headend + 1:]
changeheadlength = poslen - posheadlength + negheadlength
negsamplechangetail = possen[0:tailstart] + negusetail + possen[tailend + 1:]
changetaillength = poslen - postaillength + negtaillength
newnegsample1 = []
newnegsample1.append(mem_set[key]['0'][0][0])
newnegsample1.append(mem_set[key]['0'][0][1])
newnegsample1.append(negsamplechangehead)
newnegsample1.append(negusehead)
newnegsample1.append(posheadindex) ####wrong index
newnegsample1.append(postail)
newnegsample1.append(postailindex)
newnegsample1.append("neghead")
newnegsample1.append("postail")
newnegsample1.append("fakesen")
newnegsample1.append(changeheadlength)
newnegsample1.append(2)
newnegsample2 = []
newnegsample2.append(mem_set[key]['0'][0][0])
newnegsample2.append(mem_set[key]['0'][0][1])
newnegsample2.append(negsamplechangetail)
newnegsample2.append(poshead)
newnegsample2.append(posheadindex)
newnegsample2.append(negusetail)
newnegsample2.append(postailindex)
newnegsample1.append("poshead")
newnegsample1.append("negtail")
newnegsample1.append("fakesen")
newnegsample2.append(changetaillength)
newnegsample2.append(2)
# print(newnegsample2)
neg_mem_data.append(np.asarray(newnegsample1))
neg_mem_data.append(np.asarray(newnegsample2))
def getposandneg(logits,logits_proto,labels,typelabels):
numofpos = 0
numofneg = 0
for index, logit in enumerate(logits):
type = typelabels[index]
if type == 1:
numofpos += 1
else:
numofneg += 1
embedlen = logits.shape[1]
tensorpos = torch.zeros((numofpos, embedlen))
protopos = torch.zeros((numofpos, embedlen))
poslabels = torch.zeros([numofpos],dtype=torch.long)
tensorneg = torch.zeros((numofneg, embedlen))
protoneg = torch.zeros((numofneg, embedlen))
neglabels = torch.zeros([numofneg],dtype=torch.long)
posindex = 0
negindex = 0
for index, logit in enumerate(logits):
type = typelabels[index]
if type == 1:
tensorpos[posindex] = logits[index]
protopos[posindex] = logits_proto[index]
poslabels[posindex] = labels[index]
posindex += 1
else:
tensorneg[negindex] = logits[index]
protoneg[negindex] = logits_proto[index]
neglabels[negindex] = labels[index]
negindex += 1
#numofpos
#numofneg
#print("numofpos:\t",numofpos,"numofneg:\t",numofneg)
return tensorpos,protopos,poslabels,tensorneg,protoneg,neglabels,numofneg
def handletoken(raw_text,h_pos_li,t_pos_li,tokenizer):
h_pattern = re.compile("\* h \*")
t_pattern = re.compile("\^ t \^")
err = 0
tokens = []
h_mention = []
t_mention = []
raw_text_list = raw_text.split(" ")
for i, token in enumerate(raw_text_list):
token = token.lower()
if i >= h_pos_li[0] and i <= h_pos_li[-1]:
if i == h_pos_li[0]:
tokens += ['*', 'h', '*']
h_mention.append(token)
continue
if i >= t_pos_li[0] and i <= t_pos_li[-1]:
if i == t_pos_li[0]:
tokens += ['^', 't', '^']
t_mention.append(token)
continue
tokens.append(token)
text = " ".join(tokens)
h_mention = " ".join(h_mention)
t_mention = " ".join(t_mention)
#print(text)
#print(h_mention)
#print(t_mention)
tokenized_text = tokenizer.tokenize(text)
tokenized_head = tokenizer.tokenize(h_mention)
tokenized_tail = tokenizer.tokenize(t_mention)
p_text = " ".join(tokenized_text)
p_head = " ".join(tokenized_head)
p_tail = " ".join(tokenized_tail)
p_text = h_pattern.sub("[unused0] " + p_head + " [unused1]", p_text)
p_text = t_pattern.sub("[unused2] " + p_tail + " [unused3]", p_text)
#print(p_text)
f_text = ("[CLS] " + p_text + " [SEP]").split()
#print(f_text)
# If h_pos_li and t_pos_li overlap, we can't find head entity or tail entity.
try:
h_pos = f_text.index("[unused0]")
except:
err += 1
h_pos = 0
try:
t_pos = f_text.index("[unused2]")
except:
err += 1
t_pos = 0
tokenized_input = tokenizer.convert_tokens_to_ids(f_text)
return tokenized_input, h_pos, t_pos
def filter_sentence(sentence):
head_pos = sentence["h"]["pos"][0]
tail_pos = sentence["t"]["pos"][0]
if sentence["h"]["name"] == sentence["t"]["name"]: # head mention equals tail mention
return True
if head_pos[0] >= tail_pos[0] and head_pos[0] <= tail_pos[-1]: # head mentioin and tail mention overlap
return True
if tail_pos[0] >= head_pos[0] and tail_pos[0] <= head_pos[-1]: # head mentioin and tail mention overlap
return True
return False
def process_data(file1,file2):
data1 = json.load(open(file1))
#data2 = json.load(open(file2))
data2 = {}
max_num = 16 ###max number for every entity pair
ent_data = defaultdict(list)
for key in data1.keys():
for sentence in data1[key]:
if filter_sentence(sentence):
continue
head = sentence["h"]["id"]
tail = sentence["t"]["id"]
newsen = sentence
#print(newsen["tokens"])
newtokens = " ".join(newsen["tokens"]).lower().split(" ")
#print(newtokens)
newsen["tokens"] = newtokens
#print(newsen)
ent_data[head + "#" + tail].append(newsen)
for key in data2.keys():
for sentence in data2[key]:
if filter_sentence(sentence):
continue
head = sentence["h"]["id"]
tail = sentence["t"]["id"]
newsen = sentence
newtokens = " ".join(newsen["tokens"]).lower().split(" ")
newsen["tokens"] = newtokens
ent_data[head + "#" + tail].append(newsen)
ll = 0
list_data = []
entpair2scope = {}
for key in ent_data.keys():
#if len(ent_data[key]) < 2:
# continue
list_data.extend(ent_data[key][0:max_num])
entpair2scope[key] = [ll, len(list_data)]
ll = len(list_data)
return list_data,entpair2scope
def select_similar_data_new(training_data,tokenizer,entpair2scope,topk,max_sen_length_for_select,list_data,config,SimModel,select_thredsold,max_sen_lstm_tokenize,enctokenizer,faissindex,ifnorm,select_num=2):
#use both methods
selectdata = []
alladdnum = 0
#md5 = hashlib.md5()
has = 0
nothas = 0
for onedata in training_data:
label = onedata[0]
text = onedata[9]
headid = onedata[7]
tailid = onedata[8]
headindex = onedata[4]
tailindex = onedata[6]
onedatatoken, onedatahead, onedatatail = handletoken(text, headindex, tailindex, tokenizer)
onedicid = headid + "#" + tailid
tmpselectnum = 0
if onedicid in entpair2scope:
#print("bbbbbbbbbbbbbbb")
has += 1
thispairnum = entpair2scope[onedicid][1] - entpair2scope[onedicid][0]
#if thispairnum > topk:
if True:
###choose topk
alldisforthispair = []
input_ids = np.zeros((thispairnum + 1, max_sen_length_for_select), dtype=int)
mask = np.zeros((thispairnum + 1, max_sen_length_for_select), dtype=int)
h_pos = np.zeros((thispairnum + 1), dtype=int)
t_pos = np.zeros((thispairnum + 1), dtype=int)
for index in range(entpair2scope[onedicid][0], entpair2scope[onedicid][1]):
oneres = list_data[index]
tokens = " ".join(oneres["tokens"])
###sentence["tokens"], [h_p[0], h_p[-1]+1], [t_p[0], t_p[-1]+1]
'''
sentence example:
{
'tokens': ['Microsoft', 'was', 'founded', 'by', 'Bill', 'Gates', '.']
'h': {'pos':[[0]], 'name': 'Microsoft', 'id': Q123456},
't': {'pos':[[4,5]], 'name': 'Bill Gates', 'id': Q2333},
'r': 'P1'
}
'''
hposstart = oneres["h"]["pos"][0][0]
hposend = oneres["h"]["pos"][0][-1]
tposstart = oneres["t"]["pos"][0][0]
tposend = oneres["t"]["pos"][0][-1]
tokenres, headpos, tailpos = handletoken(tokens, [hposstart, hposend], [tposstart, tposend],
tokenizer)
length = min(len(tokenres), max_sen_length_for_select)
input_ids[index - entpair2scope[onedicid][0]][0:length] = tokenres[0:length]
mask[index - entpair2scope[onedicid][0]][0:length] = 1
h_pos[index - entpair2scope[onedicid][0]] = min(headpos, max_sen_length_for_select - 1)
t_pos[index - entpair2scope[onedicid][0]] = min(tailpos, max_sen_length_for_select - 1)
# onedatatoken, onedatahead, onedatatail
length = min(len(onedatatoken), max_sen_length_for_select)
input_ids[thispairnum][0:length] = onedatatoken[0:length]
mask[thispairnum][0:length] = 1
h_pos[thispairnum] = min(onedatahead, max_sen_length_for_select - 1)
t_pos[thispairnum] = min(onedatatail, max_sen_length_for_select - 1)
###cal score
# print(input_ids)
# print(mask)
input_ids = torch.from_numpy(input_ids).to(config["device"])
mask = torch.from_numpy(mask).to(config["device"])
h_pos = torch.from_numpy(h_pos).to(config["device"])
t_pos = torch.from_numpy(t_pos).to(config["device"])
outputs = SimModel(input_ids, mask)
indice = torch.arange(input_ids.size()[0])
h_state = outputs[0][indice, h_pos]
t_state = outputs[0][indice, t_pos]
state = torch.cat((h_state, t_state), 1)
# print(state.shape)
query = state[thispairnum, :].view(1, state.shape[-1])
toselect = state[0:thispairnum, :].view(thispairnum, state.shape[-1])
if ifnorm:
#print("norm")
querynorm = query / query.norm(dim=1)[:, None]
toselectnorm = toselect / toselect.norm(dim=1)[:, None]
res = (querynorm * toselectnorm).sum(-1)
#print(res)
else:
res = (query * toselect).sum(-1)
# print(res)
pred = []
for i in range(res.size(0)):
pred.append((res[i], i))
pred.sort(key=lambda x: x[0], reverse=True)
# print(pred)
# print(res.shape)
# print(res)
####select from pred
selectedindex = []
tmpselectnum = 0
prescore= -100.0
for k in range(len(pred)):
thistext = " ".join(list_data[entpair2scope[onedicid][0] + pred[k][1]]["tokens"])
if thistext == text:
continue
#if tmpselectnum < topk and pred[k][0] > select_thredsold and pred[k][0] != prescore:
if tmpselectnum < topk and pred[k][0] > select_thredsold:
selectedindex.append(pred[k][1])
prescore = pred[k][0]
tmpselectnum += 1
#print("tmpselectnum: ",tmpselectnum)
for onenum in selectedindex:
onelabel = label
oneneg = [label]
onesen = " ".join(list_data[entpair2scope[onedicid][0] + onenum]["tokens"])
tokens = enctokenizer.tokenize(onesen)
length = min(len(tokens), max_sen_lstm_tokenize)
tokens = enctokenizer.convert_tokens_to_ids(tokens, unk_id=enctokenizer.vocab['[UNK]'])
if (len(tokens) > max_sen_lstm_tokenize):
tokens = tokens[:max_sen_lstm_tokenize]
fakefirstent = [554, 555]
fakefirstindex = [0, 1]
fakesecondent = [665, 666]
fakesecondindex = [3, 4]
fakeheadid = "fheadid"
faketailid = "ftailid"
fakerawtext = "fakefake"
typelabel = 1 ###positive sample
oneseldata = [onelabel, oneneg, tokens, fakefirstent, fakefirstindex, fakesecondent, fakesecondindex, fakeheadid, faketailid, fakerawtext, length, typelabel]
selectdata.append(np.asarray(oneseldata))
#selectres.append(list_data[entpair2scope[onedicid][0] + onenum])
alladdnum += tmpselectnum
#else:
#print("nothing!continue")
#continue
#print("hghagdhasdgjahsgdjahgdjahgdjasgdj")
if onedicid not in entpair2scope or tmpselectnum == 0:
#print("aaaaaaaaaa")
nothas += 1
# print("not in! use fasis")
topuse = select_num
# faissindex
input_ids = np.zeros((1, max_sen_length_for_select), dtype=int)
mask = np.zeros((1, max_sen_length_for_select), dtype=int)
h_pos = np.zeros((1), dtype=int)
t_pos = np.zeros((1), dtype=int)
length = min(len(onedatatoken), max_sen_length_for_select)
input_ids[0][0:length] = onedatatoken[0:length]
mask[0][0:length] = 1
h_pos[0] = min(onedatahead, max_sen_length_for_select - 1)
t_pos[0] = min(onedatatail, max_sen_length_for_select - 1)
input_ids = torch.from_numpy(input_ids).to(config["device"])
mask = torch.from_numpy(mask).to(config["device"])
h_pos = torch.from_numpy(h_pos).to(config["device"])
t_pos = torch.from_numpy(t_pos).to(config["device"])
outputs = SimModel(input_ids, mask)
indice = torch.arange(input_ids.size()[0])
h_state = outputs[0][indice, h_pos]
t_state = outputs[0][indice, t_pos]
state = torch.cat((h_state, t_state), 1)
# print(state.shape)
#####some problems, need normalize!!!!!!!!!!!!
if ifnorm:
state = state / state.norm(dim=1)[:, None]
########################################
query = state.view(1, state.shape[-1]).cpu().detach().numpy()
D, I = faissindex.search(query, topuse)
newtouse = topuse
newadd = 0
for i in range(newtouse):
thisdis = D[0][i]
#print("&&&&&&&&&&&&&&&&&&")
#print(thisdis)
###whether to use this?
#if thisdis < 0.95:
# continue
newadd += 1
onenum = I[0][i]
onelabel = label
oneneg = [label]
onesen = " ".join(list_data[onenum]["tokens"])
###handle onesen
onesen.replace("\n\n\n", " ")
onesen.replace("\n\n", " ")
onesen.replace("\n", " ")
#print(text)
#print("********************************")
#print(onesen)
#print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
tokens = enctokenizer.tokenize(onesen)
length = min(len(tokens), max_sen_lstm_tokenize)
tokens = enctokenizer.convert_tokens_to_ids(tokens, unk_id=enctokenizer.vocab['[UNK]'])
if (len(tokens) > max_sen_lstm_tokenize):
tokens = tokens[:max_sen_lstm_tokenize]
fakefirstent = [554, 555]
fakefirstindex = [0, 1]
fakesecondent = [665, 666]
fakesecondindex = [3, 4]
fakeheadid = "fheadid"
faketailid = "ftailid"
fakerawtext = "fakefake"
typelabel = 1 ###positive sample
oneseldata = [onelabel, oneneg, tokens, fakefirstent, fakefirstindex, fakesecondent, fakesecondindex,
fakeheadid, faketailid, fakerawtext, length, typelabel]
selectdata.append(np.asarray(oneseldata))
alladdnum += newadd
return selectdata
def select_similar_data_new_bert(training_data,tokenizer,entpair2scope,topk,max_sen_length_for_select,list_data,config,SimModel,select_thredsold,max_sen_lstm_tokenize,enctokenizer,faissindex,ifnorm,select_num=2):
selectdata = []
alladdnum = 0
#md5 = hashlib.md5()
has = 0
nothas = 0
for onedata in training_data:
label = onedata[0]
text = onedata[9]
headid = onedata[7]
tailid = onedata[8]
headindex = onedata[4]
tailindex = onedata[6]
onedatatoken, onedatahead, onedatatail = handletoken(text, headindex, tailindex, tokenizer)
onedicid = headid + "#" + tailid
tmpselectnum = 0
if onedicid in entpair2scope:
#print("bbbbbbbbbbbbbbb")
has += 1
thispairnum = entpair2scope[onedicid][1] - entpair2scope[onedicid][0]
#if thispairnum > topk:
if True:
###choose topk
alldisforthispair = []
input_ids = np.zeros((thispairnum + 1, max_sen_length_for_select), dtype=int)
mask = np.zeros((thispairnum + 1, max_sen_length_for_select), dtype=int)
h_pos = np.zeros((thispairnum + 1), dtype=int)
t_pos = np.zeros((thispairnum + 1), dtype=int)
for index in range(entpair2scope[onedicid][0], entpair2scope[onedicid][1]):
oneres = list_data[index]
tokens = " ".join(oneres["tokens"])
###sentence["tokens"], [h_p[0], h_p[-1]+1], [t_p[0], t_p[-1]+1]
'''
sentence example:
{
'tokens': ['Microsoft', 'was', 'founded', 'by', 'Bill', 'Gates', '.']
'h': {'pos':[[0]], 'name': 'Microsoft', 'id': Q123456},
't': {'pos':[[4,5]], 'name': 'Bill Gates', 'id': Q2333},
'r': 'P1'
}
'''
hposstart = oneres["h"]["pos"][0][0]
hposend = oneres["h"]["pos"][0][-1]
tposstart = oneres["t"]["pos"][0][0]
tposend = oneres["t"]["pos"][0][-1]
tokenres, headpos, tailpos = handletoken(tokens, [hposstart, hposend], [tposstart, tposend],
tokenizer)
length = min(len(tokenres), max_sen_length_for_select)
input_ids[index - entpair2scope[onedicid][0]][0:length] = tokenres[0:length]
mask[index - entpair2scope[onedicid][0]][0:length] = 1
h_pos[index - entpair2scope[onedicid][0]] = min(headpos, max_sen_length_for_select - 1)
t_pos[index - entpair2scope[onedicid][0]] = min(tailpos, max_sen_length_for_select - 1)
# onedatatoken, onedatahead, onedatatail
length = min(len(onedatatoken), max_sen_length_for_select)
input_ids[thispairnum][0:length] = onedatatoken[0:length]
mask[thispairnum][0:length] = 1
h_pos[thispairnum] = min(onedatahead, max_sen_length_for_select - 1)
t_pos[thispairnum] = min(onedatatail, max_sen_length_for_select - 1)
###cal score
# print(input_ids)
# print(mask)
input_ids = torch.from_numpy(input_ids).to(config["device"])
mask = torch.from_numpy(mask).to(config["device"])
h_pos = torch.from_numpy(h_pos).to(config["device"])
t_pos = torch.from_numpy(t_pos).to(config["device"])
outputs = SimModel(input_ids, mask)
indice = torch.arange(input_ids.size()[0])
h_state = outputs[0][indice, h_pos]
t_state = outputs[0][indice, t_pos]
state = torch.cat((h_state, t_state), 1)
# print(state.shape)
query = state[thispairnum, :].view(1, state.shape[-1])
toselect = state[0:thispairnum, :].view(thispairnum, state.shape[-1])
if ifnorm:
#print("norm")
querynorm = query / query.norm(dim=1)[:, None]
toselectnorm = toselect / toselect.norm(dim=1)[:, None]
res = (querynorm * toselectnorm).sum(-1)
#print(res)
else:
res = (query * toselect).sum(-1)
# print(res)
pred = []
for i in range(res.size(0)):
pred.append((res[i], i))
pred.sort(key=lambda x: x[0], reverse=True)
# print(pred)
# print(res.shape)
# print(res)
####select from pred
selectedindex = []
tmpselectnum = 0
prescore= -100.0
for k in range(len(pred)):
thistext = " ".join(list_data[entpair2scope[onedicid][0] + pred[k][1]]["tokens"])
if thistext == text:
continue
#if tmpselectnum < topk and pred[k][0] > select_thredsold and pred[k][0] != prescore:
if tmpselectnum < topk and pred[k][0] > select_thredsold:
selectedindex.append(pred[k][1])
prescore = pred[k][0]
tmpselectnum += 1
#print("tmpselectnum: ",tmpselectnum)
for onenum in selectedindex:
oneres = list_data[entpair2scope[onedicid][0] + onenum]
onelabel = label
oneneg = [label]
onesen = " ".join(oneres["tokens"])
hposstart = oneres["h"]["pos"][0][0]
hposend = oneres["h"]["pos"][0][-1]
tposstart = oneres["t"]["pos"][0][0]
tposend = oneres["t"]["pos"][0][-1]
tokens, headpos, tailpos = handletoken(onesen, [hposstart, hposend], [tposstart, tposend],
tokenizer)
length = min(len(tokens), max_sen_lstm_tokenize)
if (len(tokens) > max_sen_lstm_tokenize):
tokens = tokens[:max_sen_lstm_tokenize]
newtokens = []
for i in range(0, length):
newtokens.append(tokens[i])
for i in range(length, max_sen_lstm_tokenize):
newtokens.append(0)
fakefirstent = [554, 555]
fakefirstindex = [0, 1]
fakesecondent = [665, 666]
fakesecondindex = [3, 4]
fakeheadid = "fheadid"
faketailid = "ftailid"
fakerawtext = "fakefake"
typelabel = 1 ###positive sample
mask = []
for i in range(0, length):
mask.append(1)
for i in range(length, max_sen_lstm_tokenize):
mask.append(0)
oneseldata = [onelabel, oneneg, newtokens, fakefirstent, fakefirstindex, fakesecondent, fakesecondindex, fakeheadid, faketailid, fakerawtext, length, typelabel, mask]
selectdata.append(np.asarray(oneseldata))
#selectres.append(list_data[entpair2scope[onedicid][0] + onenum])
alladdnum += tmpselectnum
#else:
if onedicid not in entpair2scope or tmpselectnum == 0:
#print("aaaaaaaaaa")
nothas += 1
# print("not in! use fasis")
topuse = select_num
# faissindex
input_ids = np.zeros((1, max_sen_length_for_select), dtype=int)
mask = np.zeros((1, max_sen_length_for_select), dtype=int)
h_pos = np.zeros((1), dtype=int)
t_pos = np.zeros((1), dtype=int)
length = min(len(onedatatoken), max_sen_length_for_select)
input_ids[0][0:length] = onedatatoken[0:length]
mask[0][0:length] = 1
h_pos[0] = min(onedatahead, max_sen_length_for_select - 1)
t_pos[0] = min(onedatatail, max_sen_length_for_select - 1)
input_ids = torch.from_numpy(input_ids).to(config["device"])
mask = torch.from_numpy(mask).to(config["device"])
h_pos = torch.from_numpy(h_pos).to(config["device"])
t_pos = torch.from_numpy(t_pos).to(config["device"])
outputs = SimModel(input_ids, mask)
indice = torch.arange(input_ids.size()[0])
h_state = outputs[0][indice, h_pos]
t_state = outputs[0][indice, t_pos]
state = torch.cat((h_state, t_state), 1)
# print(state.shape)
#####some problems, need normalize!!!!!!!!!!!!
if ifnorm:
state = state / state.norm(dim=1)[:, None]
########################################
query = state.view(1, state.shape[-1]).cpu().detach().numpy()
D, I = faissindex.search(query, topuse)
newtouse = topuse
newadd = 0
for i in range(newtouse):
thisdis = D[0][i]
#print("&&&&&&&&&&&&&&&&&&")
#print(thisdis)
###whether to use this?
#if thisdis < 0.95:
# continue
newadd += 1
onenum = I[0][i]
onelabel = label
oneneg = [label]
oneres = list_data[onenum]
onesen = " ".join(oneres["tokens"])
hposstart = oneres["h"]["pos"][0][0]
hposend = oneres["h"]["pos"][0][-1]
tposstart = oneres["t"]["pos"][0][0]
tposend = oneres["t"]["pos"][0][-1]
tokens, headpos, tailpos = handletoken(onesen, [hposstart, hposend], [tposstart, tposend],
tokenizer)
length = min(len(tokens), max_sen_lstm_tokenize)
if (len(tokens) > max_sen_lstm_tokenize):
tokens = tokens[:max_sen_lstm_tokenize]
newtokens = []
for i in range(0, length):
newtokens.append(tokens[i])
for i in range(length, max_sen_lstm_tokenize):
newtokens.append(0)
fakefirstent = [554, 555]
fakefirstindex = [0, 1]
fakesecondent = [665, 666]
fakesecondindex = [3, 4]
fakeheadid = "fheadid"
faketailid = "ftailid"
fakerawtext = "fakefake"
typelabel = 1 ###positive sample
mask = []
for i in range(0, length):
mask.append(1)
for i in range(length, max_sen_lstm_tokenize):
mask.append(0)
oneseldata = [onelabel, oneneg, newtokens, fakefirstent, fakefirstindex, fakesecondent, fakesecondindex,
fakeheadid, faketailid, fakerawtext, length, typelabel,mask]
selectdata.append(np.asarray(oneseldata))
alladdnum += newadd
return selectdata
def select_similar_data_new_tac(training_data,tokenizer,entpair2scope,topk,max_sen_length_for_select,list_data,config,SimModel,select_thredsold,max_sen_lstm_tokenize,enctokenizer,faissindex,ifnorm,select_num=2):
selectdata = []
alladdnum = 0
#md5 = hashlib.md5()
has = 0
nothas = 0
for onedata in training_data:
label = onedata[0]
text = onedata[9]
headid = onedata[7]
tailid = onedata[8]
headindex = onedata[4]
tailindex = onedata[6]
onedatatoken, onedatahead, onedatatail = handletoken(text, headindex, tailindex, tokenizer)
onedicid = headid + "#" + tailid
tmpselectnum = 0
if onedicid in entpair2scope:
#print("bbbbbbbbbbbbbbb")
has += 1
thispairnum = entpair2scope[onedicid][1] - entpair2scope[onedicid][0]
#if thispairnum > topk:
if True:
###choose topk
alldisforthispair = []
input_ids = np.zeros((thispairnum + 1, max_sen_length_for_select), dtype=int)
mask = np.zeros((thispairnum + 1, max_sen_length_for_select), dtype=int)
h_pos = np.zeros((thispairnum + 1), dtype=int)
t_pos = np.zeros((thispairnum + 1), dtype=int)
for index in range(entpair2scope[onedicid][0], entpair2scope[onedicid][1]):
oneres = list_data[index]
tokens = " ".join(oneres["tokens"])
###sentence["tokens"], [h_p[0], h_p[-1]+1], [t_p[0], t_p[-1]+1]
'''
sentence example:
{
'tokens': ['Microsoft', 'was', 'founded', 'by', 'Bill', 'Gates', '.']
'h': {'pos':[[0]], 'name': 'Microsoft', 'id': Q123456},
't': {'pos':[[4,5]], 'name': 'Bill Gates', 'id': Q2333},
'r': 'P1'
}
'''
hposstart = oneres["h"]["pos"][0][0]
hposend = oneres["h"]["pos"][0][-1]
tposstart = oneres["t"]["pos"][0][0]
tposend = oneres["t"]["pos"][0][-1]
tokenres, headpos, tailpos = handletoken(tokens, [hposstart, hposend], [tposstart, tposend],
tokenizer)
length = min(len(tokenres), max_sen_length_for_select)
input_ids[index - entpair2scope[onedicid][0]][0:length] = tokenres[0:length]
mask[index - entpair2scope[onedicid][0]][0:length] = 1
h_pos[index - entpair2scope[onedicid][0]] = min(headpos, max_sen_length_for_select - 1)
t_pos[index - entpair2scope[onedicid][0]] = min(tailpos, max_sen_length_for_select - 1)
# onedatatoken, onedatahead, onedatatail
length = min(len(onedatatoken), max_sen_length_for_select)
input_ids[thispairnum][0:length] = onedatatoken[0:length]
mask[thispairnum][0:length] = 1
h_pos[thispairnum] = min(onedatahead, max_sen_length_for_select - 1)
t_pos[thispairnum] = min(onedatatail, max_sen_length_for_select - 1)
###cal score
# print(input_ids)
# print(mask)
input_ids = torch.from_numpy(input_ids).to(config["device"])
mask = torch.from_numpy(mask).to(config["device"])
h_pos = torch.from_numpy(h_pos).to(config["device"])
t_pos = torch.from_numpy(t_pos).to(config["device"])
outputs = SimModel(input_ids, mask)
indice = torch.arange(input_ids.size()[0])
h_state = outputs[0][indice, h_pos]
t_state = outputs[0][indice, t_pos]
state = torch.cat((h_state, t_state), 1)
# print(state.shape)
query = state[thispairnum, :].view(1, state.shape[-1])
toselect = state[0:thispairnum, :].view(thispairnum, state.shape[-1])
if ifnorm:
#print("norm")
querynorm = query / query.norm(dim=1)[:, None]
toselectnorm = toselect / toselect.norm(dim=1)[:, None]
res = (querynorm * toselectnorm).sum(-1)
#print(res)
else:
res = (query * toselect).sum(-1)
# print(res)
pred = []
for i in range(res.size(0)):
pred.append((res[i], i))
pred.sort(key=lambda x: x[0], reverse=True)
# print(pred)
# print(res.shape)
# print(res)
####select from pred
selectedindex = []
tmpselectnum = 0
prescore= -100.0
for k in range(len(pred)):
thistext = " ".join(list_data[entpair2scope[onedicid][0] + pred[k][1]]["tokens"])
if thistext == text:
continue
#if tmpselectnum < topk and pred[k][0] > select_thredsold and pred[k][0] != prescore:
if tmpselectnum < topk and pred[k][0] > select_thredsold:
selectedindex.append(pred[k][1])
prescore = pred[k][0]
tmpselectnum += 1
#print("tmpselectnum: ",tmpselectnum)
for onenum in selectedindex:
onelabel = label
oneneg = [label]
onesen = " ".join(list_data[entpair2scope[onedicid][0] + onenum]["tokens"])
tokens = enctokenizer.tokenize(onesen)
length = min(len(tokens), max_sen_lstm_tokenize)
tokens = enctokenizer.convert_tokens_to_ids(tokens, unk_id=enctokenizer.vocab['[UNK]'])
if (len(tokens) > max_sen_lstm_tokenize):
tokens = tokens[:max_sen_lstm_tokenize]
fakefirstent = [554, 555]
fakefirstindex = [0, 1]
fakesecondent = [665, 666]
fakesecondindex = [3, 4]
fakeheadid = "fheadid"
faketailid = "ftailid"
fakerawtext = "fakefake"
typelabel = 1 ###positive sample
oneseldata = [onelabel, oneneg, tokens, fakefirstent, fakefirstindex, fakesecondent, fakesecondindex, fakeheadid, faketailid, fakerawtext, length, typelabel]
selectdata.append(np.asarray(oneseldata))
#selectres.append(list_data[entpair2scope[onedicid][0] + onenum])
alladdnum += tmpselectnum
#else:
if onedicid not in entpair2scope or tmpselectnum == 0:
#print("aaaaaaaaaa")
nothas += 1
# print("not in! use fasis")
topuse = select_num
# faissindex
input_ids = np.zeros((1, max_sen_length_for_select), dtype=int)
mask = np.zeros((1, max_sen_length_for_select), dtype=int)
h_pos = np.zeros((1), dtype=int)
t_pos = np.zeros((1), dtype=int)
length = min(len(onedatatoken), max_sen_length_for_select)
input_ids[0][0:length] = onedatatoken[0:length]
mask[0][0:length] = 1
h_pos[0] = min(onedatahead, max_sen_length_for_select - 1)
t_pos[0] = min(onedatatail, max_sen_length_for_select - 1)
input_ids = torch.from_numpy(input_ids).to(config["device"])
mask = torch.from_numpy(mask).to(config["device"])
h_pos = torch.from_numpy(h_pos).to(config["device"])
t_pos = torch.from_numpy(t_pos).to(config["device"])
outputs = SimModel(input_ids, mask)
indice = torch.arange(input_ids.size()[0])
h_state = outputs[0][indice, h_pos]
t_state = outputs[0][indice, t_pos]
state = torch.cat((h_state, t_state), 1)
# print(state.shape)
#####some problems, need normalize!!!!!!!!!!!!
if ifnorm:
state = state / state.norm(dim=1)[:, None]
########################################
query = state.view(1, state.shape[-1]).cpu().detach().numpy()
D, I = faissindex.search(query, topuse)
newtouse = topuse
newadd = 0
for i in range(newtouse):
thisdis = D[0][i]
#print("&&&&&&&&&&&&&&&&&&")
#print(thisdis)
###whether to use this?
if thisdis < 0.96:
continue
newadd += 1
onenum = I[0][i]
onelabel = label
oneneg = [label]
onesen = " ".join(list_data[onenum]["tokens"])
###handle onesen
onesen.replace("\n\n\n", " ")
onesen.replace("\n\n", " ")
onesen.replace("\n", " ")
#print(text)
#print("********************************")
#print(onesen)
#print("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
tokens = enctokenizer.tokenize(onesen)
length = min(len(tokens), max_sen_lstm_tokenize)
tokens = enctokenizer.convert_tokens_to_ids(tokens, unk_id=enctokenizer.vocab['[UNK]'])
if (len(tokens) > max_sen_lstm_tokenize):
tokens = tokens[:max_sen_lstm_tokenize]
fakefirstent = [554, 555]
fakefirstindex = [0, 1]
fakesecondent = [665, 666]
fakesecondindex = [3, 4]
fakeheadid = "fheadid"
faketailid = "ftailid"
fakerawtext = "fakefake"
typelabel = 1 ###positive sample
oneseldata = [onelabel, oneneg, tokens, fakefirstent, fakefirstindex, fakesecondent, fakesecondindex,
fakeheadid, faketailid, fakerawtext, length, typelabel]
selectdata.append(np.asarray(oneseldata))
alladdnum += newadd
return selectdata
def select_similar_data_new_bert_tac(training_data,tokenizer,entpair2scope,topk,max_sen_length_for_select,list_data,config,SimModel,select_thredsold,max_sen_lstm_tokenize,enctokenizer,faissindex,ifnorm,select_num=2):
selectdata = []
alladdnum = 0
#md5 = hashlib.md5()
has = 0
nothas = 0
for onedata in training_data:
label = onedata[0]
text = onedata[9]
headid = onedata[7]
tailid = onedata[8]
headindex = onedata[4]
tailindex = onedata[6]
onedatatoken, onedatahead, onedatatail = handletoken(text, headindex, tailindex, tokenizer)
onedicid = headid + "#" + tailid
tmpselectnum = 0
if onedicid in entpair2scope:
#print("bbbbbbbbbbbbbbb")
has += 1
thispairnum = entpair2scope[onedicid][1] - entpair2scope[onedicid][0]
#if thispairnum > topk:
if True:
###choose topk
alldisforthispair = []
input_ids = np.zeros((thispairnum + 1, max_sen_length_for_select), dtype=int)
mask = np.zeros((thispairnum + 1, max_sen_length_for_select), dtype=int)
h_pos = np.zeros((thispairnum + 1), dtype=int)
t_pos = np.zeros((thispairnum + 1), dtype=int)
for index in range(entpair2scope[onedicid][0], entpair2scope[onedicid][1]):
oneres = list_data[index]
tokens = " ".join(oneres["tokens"])
###sentence["tokens"], [h_p[0], h_p[-1]+1], [t_p[0], t_p[-1]+1]
'''
sentence example:
{
'tokens': ['Microsoft', 'was', 'founded', 'by', 'Bill', 'Gates', '.']
'h': {'pos':[[0]], 'name': 'Microsoft', 'id': Q123456},
't': {'pos':[[4,5]], 'name': 'Bill Gates', 'id': Q2333},
'r': 'P1'
}
'''
hposstart = oneres["h"]["pos"][0][0]
hposend = oneres["h"]["pos"][0][-1]
tposstart = oneres["t"]["pos"][0][0]
tposend = oneres["t"]["pos"][0][-1]
tokenres, headpos, tailpos = handletoken(tokens, [hposstart, hposend], [tposstart, tposend],
tokenizer)
length = min(len(tokenres), max_sen_length_for_select)
input_ids[index - entpair2scope[onedicid][0]][0:length] = tokenres[0:length]
mask[index - entpair2scope[onedicid][0]][0:length] = 1
h_pos[index - entpair2scope[onedicid][0]] = min(headpos, max_sen_length_for_select - 1)
t_pos[index - entpair2scope[onedicid][0]] = min(tailpos, max_sen_length_for_select - 1)
# onedatatoken, onedatahead, onedatatail
length = min(len(onedatatoken), max_sen_length_for_select)
input_ids[thispairnum][0:length] = onedatatoken[0:length]
mask[thispairnum][0:length] = 1
h_pos[thispairnum] = min(onedatahead, max_sen_length_for_select - 1)
t_pos[thispairnum] = min(onedatatail, max_sen_length_for_select - 1)
###cal score
# print(input_ids)
# print(mask)
input_ids = torch.from_numpy(input_ids).to(config["device"])
mask = torch.from_numpy(mask).to(config["device"])
h_pos = torch.from_numpy(h_pos).to(config["device"])
t_pos = torch.from_numpy(t_pos).to(config["device"])
outputs = SimModel(input_ids, mask)
indice = torch.arange(input_ids.size()[0])
h_state = outputs[0][indice, h_pos]
t_state = outputs[0][indice, t_pos]
state = torch.cat((h_state, t_state), 1)
# print(state.shape)
query = state[thispairnum, :].view(1, state.shape[-1])
toselect = state[0:thispairnum, :].view(thispairnum, state.shape[-1])
if ifnorm:
#print("norm")
querynorm = query / query.norm(dim=1)[:, None]
toselectnorm = toselect / toselect.norm(dim=1)[:, None]
res = (querynorm * toselectnorm).sum(-1)
#print(res)
else:
res = (query * toselect).sum(-1)
# print(res)
pred = []
for i in range(res.size(0)):
pred.append((res[i], i))
pred.sort(key=lambda x: x[0], reverse=True)
# print(pred)
# print(res.shape)
# print(res)
####select from pred
selectedindex = []
tmpselectnum = 0
prescore= -100.0
for k in range(len(pred)):
thistext = " ".join(list_data[entpair2scope[onedicid][0] + pred[k][1]]["tokens"])
if thistext == text:
continue
#if tmpselectnum < topk and pred[k][0] > select_thredsold and pred[k][0] != prescore:
if tmpselectnum < topk and pred[k][0] > select_thredsold:
selectedindex.append(pred[k][1])
prescore = pred[k][0]
tmpselectnum += 1
#print("tmpselectnum: ",tmpselectnum)
for onenum in selectedindex:
oneres = list_data[entpair2scope[onedicid][0] + onenum]
onelabel = label
oneneg = [label]
onesen = " ".join(oneres["tokens"])
hposstart = oneres["h"]["pos"][0][0]
hposend = oneres["h"]["pos"][0][-1]
tposstart = oneres["t"]["pos"][0][0]
tposend = oneres["t"]["pos"][0][-1]
tokens, headpos, tailpos = handletoken(onesen, [hposstart, hposend], [tposstart, tposend],
tokenizer)
length = min(len(tokens), max_sen_lstm_tokenize)
if (len(tokens) > max_sen_lstm_tokenize):
tokens = tokens[:max_sen_lstm_tokenize]
newtokens = []
for i in range(0, length):
newtokens.append(tokens[i])
for i in range(length, max_sen_lstm_tokenize):
newtokens.append(0)
fakefirstent = [554, 555]
fakefirstindex = [0, 1]
fakesecondent = [665, 666]
fakesecondindex = [3, 4]
fakeheadid = "fheadid"
faketailid = "ftailid"
fakerawtext = "fakefake"
typelabel = 1 ###positive sample
mask = []
for i in range(0, length):
mask.append(1)
for i in range(length, max_sen_lstm_tokenize):
mask.append(0)
oneseldata = [onelabel, oneneg, newtokens, fakefirstent, fakefirstindex, fakesecondent, fakesecondindex, fakeheadid, faketailid, fakerawtext, length, typelabel, mask]
selectdata.append(np.asarray(oneseldata))
#selectres.append(list_data[entpair2scope[onedicid][0] + onenum])
alladdnum += tmpselectnum
#else:
if onedicid not in entpair2scope or tmpselectnum == 0:
#print("aaaaaaaaaa")
nothas += 1
# print("not in! use fasis")
topuse = select_num
# faissindex
input_ids = np.zeros((1, max_sen_length_for_select), dtype=int)
mask = np.zeros((1, max_sen_length_for_select), dtype=int)
h_pos = np.zeros((1), dtype=int)
t_pos = np.zeros((1), dtype=int)
length = min(len(onedatatoken), max_sen_length_for_select)
input_ids[0][0:length] = onedatatoken[0:length]
mask[0][0:length] = 1
h_pos[0] = min(onedatahead, max_sen_length_for_select - 1)
t_pos[0] = min(onedatatail, max_sen_length_for_select - 1)
input_ids = torch.from_numpy(input_ids).to(config["device"])
mask = torch.from_numpy(mask).to(config["device"])
h_pos = torch.from_numpy(h_pos).to(config["device"])
t_pos = torch.from_numpy(t_pos).to(config["device"])
outputs = SimModel(input_ids, mask)
indice = torch.arange(input_ids.size()[0])
h_state = outputs[0][indice, h_pos]
t_state = outputs[0][indice, t_pos]
state = torch.cat((h_state, t_state), 1)
# print(state.shape)
#####some problems, need normalize!!!!!!!!!!!!
if ifnorm:
state = state / state.norm(dim=1)[:, None]
########################################
query = state.view(1, state.shape[-1]).cpu().detach().numpy()
D, I = faissindex.search(query, topuse)
newtouse = topuse
newadd = 0
for i in range(newtouse):
thisdis = D[0][i]
#print("&&&&&&&&&&&&&&&&&&")
#print(thisdis)
###whether to use this?
if thisdis < 0.96:
continue
newadd += 1
onenum = I[0][i]
onelabel = label
oneneg = [label]
oneres = list_data[onenum]
onesen = " ".join(oneres["tokens"])
hposstart = oneres["h"]["pos"][0][0]
hposend = oneres["h"]["pos"][0][-1]
tposstart = oneres["t"]["pos"][0][0]
tposend = oneres["t"]["pos"][0][-1]
tokens, headpos, tailpos = handletoken(onesen, [hposstart, hposend], [tposstart, tposend],
tokenizer)
length = min(len(tokens), max_sen_lstm_tokenize)
if (len(tokens) > max_sen_lstm_tokenize):
tokens = tokens[:max_sen_lstm_tokenize]
newtokens = []
for i in range(0, length):
newtokens.append(tokens[i])
for i in range(length, max_sen_lstm_tokenize):
newtokens.append(0)
fakefirstent = [554, 555]
fakefirstindex = [0, 1]
fakesecondent = [665, 666]
fakesecondindex = [3, 4]
fakeheadid = "fheadid"
faketailid = "ftailid"
fakerawtext = "fakefake"
typelabel = 1 ###positive sample
mask = []
for i in range(0, length):
mask.append(1)
for i in range(length, max_sen_lstm_tokenize):
mask.append(0)
oneseldata = [onelabel, oneneg, newtokens, fakefirstent, fakefirstindex, fakesecondent, fakesecondindex,
fakeheadid, faketailid, fakerawtext, length, typelabel,mask]
selectdata.append(np.asarray(oneseldata))
alladdnum += newadd
return selectdata | 44.654494 | 216 | 0.535651 | 6,418 | 63,588 | 5.174509 | 0.06622 | 0.016622 | 0.020235 | 0.025294 | 0.798163 | 0.779103 | 0.750979 | 0.746372 | 0.742849 | 0.739717 | 0 | 0.024904 | 0.338208 | 63,588 | 1,424 | 217 | 44.654494 | 0.76427 | 0.074904 | 0 | 0.73376 | 0 | 0 | 0.024635 | 0.004582 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014639 | false | 0 | 0.008234 | 0 | 0.037511 | 0.018298 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
49f8e03bdce3e6c737e3653ed1439effe4c926dd | 73 | py | Python | model/decoder/__init__.py | rulixiang/ToCo | 9319e8c792bff5ac596e43d362d6a4a3c6200cf7 | [
"MIT"
] | null | null | null | model/decoder/__init__.py | rulixiang/ToCo | 9319e8c792bff5ac596e43d362d6a4a3c6200cf7 | [
"MIT"
] | null | null | null | model/decoder/__init__.py | rulixiang/ToCo | 9319e8c792bff5ac596e43d362d6a4a3c6200cf7 | [
"MIT"
] | null | null | null | from .conv_head import LargeFOV
from .segformer_head import SegFormerHead | 36.5 | 41 | 0.876712 | 10 | 73 | 6.2 | 0.7 | 0.322581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09589 | 73 | 2 | 41 | 36.5 | 0.939394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b710ba895e8133d5d7ab58be3901f0cc4da156fb | 175 | py | Python | src/rl/core/utils.py | djjh/reinforcement-learning-labs | 22706dab9e7f16e364ee4ed79c0bd67a343e5b08 | [
"MIT"
] | 1 | 2019-10-06T11:45:52.000Z | 2019-10-06T11:45:52.000Z | src/rl/core/utils.py | djjh/reinforcement-learning-labs | 22706dab9e7f16e364ee4ed79c0bd67a343e5b08 | [
"MIT"
] | null | null | null | src/rl/core/utils.py | djjh/reinforcement-learning-labs | 22706dab9e7f16e364ee4ed79c0bd67a343e5b08 | [
"MIT"
] | null | null | null | import numpy as np
import itertools
def discount_cumsum(rewards, discount):
return reversed(list(itertools.accumulate(reversed(rewards), lambda a, b: discount * a + b)))
| 29.166667 | 97 | 0.76 | 24 | 175 | 5.5 | 0.666667 | 0.030303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.137143 | 175 | 5 | 98 | 35 | 0.874172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.5 | 0.25 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
b74238c8165845cec23ecf92d3a2a2a04663d25b | 4,703 | py | Python | tests/precisions.py | sydp/dfdatetime | fbb4ed335861a99e0c87802c51e9d1d58c276b98 | [
"Apache-2.0"
] | 17 | 2016-04-12T16:26:14.000Z | 2022-02-18T22:27:36.000Z | tests/precisions.py | sydp/dfdatetime | fbb4ed335861a99e0c87802c51e9d1d58c276b98 | [
"Apache-2.0"
] | 149 | 2016-03-10T22:20:13.000Z | 2022-02-19T08:47:56.000Z | tests/precisions.py | sydp/dfdatetime | fbb4ed335861a99e0c87802c51e9d1d58c276b98 | [
"Apache-2.0"
] | 15 | 2016-03-10T06:44:27.000Z | 2022-02-07T12:53:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for date and time precision helpers."""
import decimal
import unittest
from dfdatetime import definitions
from dfdatetime import precisions
class DateTimePrecisionHelperTest(unittest.TestCase):
"""Tests for the date time precision helper interface."""
def testCopyMicrosecondsToFractionOfSecond(self):
"""Tests the CopyMicrosecondsToFractionOfSecond function."""
precision_helper = precisions.DateTimePrecisionHelper
with self.assertRaises(NotImplementedError):
precision_helper.CopyMicrosecondsToFractionOfSecond(0)
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
precision_helper = precisions.DateTimePrecisionHelper
with self.assertRaises(NotImplementedError):
precision_helper.CopyToDateTimeString((2018, 1, 2, 19, 45, 12), 0.5)
class SecondsPrecisionHelperTest(unittest.TestCase):
"""Tests for the seconds precision helper."""
def testCopyMicrosecondsToFractionOfSecond(self):
"""Tests the CopyMicrosecondsToFractionOfSecond function."""
precision_helper = precisions.SecondsPrecisionHelper
fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond(
123456)
self.assertEqual(fraction_of_second, 0.0)
with self.assertRaises(ValueError):
precision_helper.CopyMicrosecondsToFractionOfSecond(-1)
with self.assertRaises(ValueError):
precision_helper.CopyMicrosecondsToFractionOfSecond(1000000)
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
precision_helper = precisions.SecondsPrecisionHelper
date_time_string = precision_helper.CopyToDateTimeString(
(2018, 1, 2, 19, 45, 12), 0.123456)
self.assertEqual(date_time_string, '2018-01-02 19:45:12')
with self.assertRaises(ValueError):
precision_helper.CopyToDateTimeString((2018, 1, 2, 19, 45, 12), 4.123456)
class MillisecondsPrecisionHelperTest(unittest.TestCase):
"""Tests for the milliseconds precision helper."""
def testCopyMicrosecondsToFractionOfSecond(self):
"""Tests the CopyMicrosecondsToFractionOfSecond function."""
precision_helper = precisions.MillisecondsPrecisionHelper
fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond(
123456)
self.assertEqual(fraction_of_second, decimal.Decimal('0.123'))
with self.assertRaises(ValueError):
precision_helper.CopyMicrosecondsToFractionOfSecond(-1)
with self.assertRaises(ValueError):
precision_helper.CopyMicrosecondsToFractionOfSecond(1000000)
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
precision_helper = precisions.MillisecondsPrecisionHelper
date_time_string = precision_helper.CopyToDateTimeString(
(2018, 1, 2, 19, 45, 12), 0.123456)
self.assertEqual(date_time_string, '2018-01-02 19:45:12.123')
with self.assertRaises(ValueError):
precision_helper.CopyToDateTimeString((2018, 1, 2, 19, 45, 12), 4.123456)
class MicrosecondsPrecisionHelperTest(unittest.TestCase):
"""Tests for the milliseconds precision helper."""
def testCopyMicrosecondsToFractionOfSecond(self):
"""Tests the CopyMicrosecondsToFractionOfSecond function."""
precision_helper = precisions.MicrosecondsPrecisionHelper
fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond(
123456)
self.assertEqual(fraction_of_second, decimal.Decimal('0.123456'))
with self.assertRaises(ValueError):
precision_helper.CopyMicrosecondsToFractionOfSecond(-1)
with self.assertRaises(ValueError):
precision_helper.CopyMicrosecondsToFractionOfSecond(1000000)
def testCopyToDateTimeString(self):
"""Tests the CopyToDateTimeString function."""
precision_helper = precisions.MicrosecondsPrecisionHelper
date_time_string = precision_helper.CopyToDateTimeString(
(2018, 1, 2, 19, 45, 12), 0.123456)
self.assertEqual(date_time_string, '2018-01-02 19:45:12.123456')
with self.assertRaises(ValueError):
precision_helper.CopyToDateTimeString((2018, 1, 2, 19, 45, 12), 4.123456)
class PrecisionHelperFactoryTest(unittest.TestCase):
"""Tests for the date time precision helper factory."""
def testCreatePrecisionHelper(self):
"""Tests the CreatePrecisionHelper function."""
precision_helper = precisions.PrecisionHelperFactory.CreatePrecisionHelper(
definitions.PRECISION_1_MICROSECOND)
self.assertIsNotNone(precision_helper)
with self.assertRaises(ValueError):
precisions.PrecisionHelperFactory.CreatePrecisionHelper('bogus')
if __name__ == '__main__':
unittest.main()
| 35.360902 | 79 | 0.770359 | 440 | 4,703 | 8.095455 | 0.175 | 0.134756 | 0.067378 | 0.084222 | 0.811061 | 0.760809 | 0.760809 | 0.755755 | 0.755755 | 0.727681 | 0 | 0.058432 | 0.137572 | 4,703 | 132 | 80 | 35.628788 | 0.819773 | 0.157984 | 0 | 0.671233 | 0 | 0 | 0.024208 | 0 | 0 | 0 | 0 | 0 | 0.260274 | 1 | 0.123288 | false | 0 | 0.054795 | 0 | 0.246575 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3f79d450f20cbfb54af862f1bc635e21cea416e1 | 71 | py | Python | tanda/generator/__init__.py | vishalbelsare/tanda | 83ffe22e3ecd4061e9d96e90d8135fd44cddddce | [
"MIT"
] | 166 | 2017-08-10T17:28:49.000Z | 2022-03-15T01:49:09.000Z | tanda/generator/__init__.py | vishalbelsare/tanda | 83ffe22e3ecd4061e9d96e90d8135fd44cddddce | [
"MIT"
] | 25 | 2017-08-12T17:08:46.000Z | 2022-02-09T23:37:53.000Z | tanda/generator/__init__.py | vishalbelsare/tanda | 83ffe22e3ecd4061e9d96e90d8135fd44cddddce | [
"MIT"
] | 35 | 2017-08-26T01:54:45.000Z | 2021-12-18T07:22:41.000Z | from .generator import GRUGenerator, LSTMGenerator, MeanFieldGenerator
| 35.5 | 70 | 0.873239 | 6 | 71 | 10.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.084507 | 71 | 1 | 71 | 71 | 0.953846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
3fa587b39acaaaf6c59a7c75c5c33b8d2fd55c18 | 3,894 | py | Python | supra/Geminus/geminusSearch.py | wmpg/Supracenter | 17b9d66095d63a5aa679c30c281f12a4a46db55a | [
"MIT"
] | 6 | 2021-02-16T20:49:37.000Z | 2022-03-25T08:46:57.000Z | supra/Geminus/geminusSearch.py | wmpg/Supracenter | 17b9d66095d63a5aa679c30c281f12a4a46db55a | [
"MIT"
] | 1 | 2021-02-26T02:16:22.000Z | 2021-04-23T14:10:37.000Z | supra/Geminus/geminusSearch.py | wmpg/Supracenter | 17b9d66095d63a5aa679c30c281f12a4a46db55a | [
"MIT"
] | null | null | null |
from supra.Geminus.overpressure2 import overpressureihmod_Ro
from supra.GUI.Tools.GUITools import *
from supra.Utils.pso import pso
SWARM_SIZE = 5
MAX_ITER = 25
def overpressureErr(Ro, *args):
""" Function to optimize for period or pressure
"""
source_list, stat, v, theta, dphi, sounding_pres, sw, wind, dopplershift, target, mode, regime = args
tau, tauws, Z, sR, inc, talt, dpws, dp, it = overpressureihmod_Ro(source_list, stat, Ro[0], v, theta, dphi, sounding_pres, sw, wind=wind, dopplershift=dopplershift)
if mode == 'pres':
if regime == 'ws':
err = abs(target - dpws[-1])
else:
err = abs(target - dp[-1])
else:
if regime == 'ws':
err = abs(target - tauws[-1])
else:
err = abs(target - tau[-1])
return err
def periodSearch(p, gem_inputs, paths=False):
''' Uses PSO to find the Relaxation radius that returns the desired period through the Geminus program
'''
Ro = 10.0
target_period = p
period_ws = 0
tol = 1e-3
tau = []
search_min = [tol]
search_max = [100]
Ro, f_opt = pso(overpressureErr, search_min, search_max, \
args=gem_inputs + [p, 'period', 'ws'], swarmsize=SWARM_SIZE, maxiter=MAX_ITER, processes=1, minfunc=tol, minstep=1e-3)
Ro = Ro[0]
print("Period Weak Shock: {:.2f} s Ro = {:.3f} m".format(p, Ro))
Ro_ws = Ro
Ro = 10.0
period_lin = 0
Ro, f_opt = pso(overpressureErr, search_min, search_max, \
args=gem_inputs + [p, 'period', 'lin'], swarmsize=SWARM_SIZE, maxiter=MAX_ITER, processes=1, minfunc=tol, minstep=1e-3)
Ro = Ro[0]
print("Period Linear: {:.2f} s Ro = {:.3f} m".format(p, Ro))
Ro_lin = Ro
if paths:
source_list, stat, v, theta, dphi, sounding_pres, sw, wind, dopplershift = gem_inputs
tau, tauws, Z, sR, inc, talt, dpws, dp, it = overpressureihmod_Ro(source_list, stat, Ro_ws, v, theta, dphi, sounding_pres, sw, wind=wind, dopplershift=dopplershift)
weak_path = tau[:it] + tauws[it:]
tau, tauws, Z, sR, inc, talt, dpws, dp, it = overpressureihmod_Ro(source_list, stat, Ro_lin, v, theta, dphi, sounding_pres, sw, wind=wind, dopplershift=dopplershift)
lin_path = tau
return Ro_ws, Ro_lin, weak_path, lin_path, tau, Z, it
return Ro_ws, Ro_lin
def presSearch(p, gem_inputs, paths=False):
Ro = 10.0
target_pres = p
pres_ws = 0
tol = 1e-3
source_list, stat, v, theta, dphi, sounding_pres, sw, wind, dopplershift = gem_inputs
search_min = [tol]
search_max = [100]
Ro, f_opt = pso(overpressureErr, search_min, search_max, \
args=gem_inputs + [p, 'pres', 'ws'], swarmsize=SWARM_SIZE, maxiter=MAX_ITER, processes=1, minfunc=tol, minstep=1e-3)
Ro = Ro[0]
print("Pressure Weak Shock: {:.2f} mPa Ro = {:.3f} m".format(p*1000, Ro))
Ro_ws = Ro
Ro = 10.0
pres_lin = 0
Ro, f_opt = pso(overpressureErr, search_min, search_max, \
args=gem_inputs + [p, 'pres', 'lin'], swarmsize=SWARM_SIZE, maxiter=MAX_ITER, processes=1, minfunc=tol, minstep=1e-3)
Ro = Ro[0]
print("Pressure Linear: {:.2f} mPa Ro = {:.3f} m".format(p*1000, Ro))
Ro_lin = Ro
if paths:
source_list, stat, v, theta, dphi, sounding_pres, sw, wind, dopplershift = gem_inputs
tau, tauws, Z, sR, inc, talt, dpws, dp, it = overpressureihmod_Ro(source_list, stat, Ro_ws, v, theta, dphi, sounding_pres, sw, wind=wind, dopplershift=dopplershift)
weak_path = tau[:it] + tauws[it:]
tau, tauws, Z, sR, inc, talt, dpws, dp, it = overpressureihmod_Ro(source_list, stat, Ro_lin, v, theta, dphi, sounding_pres, sw, wind=wind, dopplershift=dopplershift)
lin_path = tau
return Ro_ws, Ro_lin, weak_path, lin_path, tau, Z, it
return Ro_ws, Ro_lin
if __name__ == '__main__':
pass | 30.904762 | 173 | 0.629687 | 577 | 3,894 | 4.081456 | 0.183709 | 0.016985 | 0.053503 | 0.06879 | 0.803397 | 0.764331 | 0.745648 | 0.738004 | 0.738004 | 0.723567 | 0 | 0.022589 | 0.238315 | 3,894 | 126 | 174 | 30.904762 | 0.771409 | 0.03775 | 0 | 0.571429 | 0 | 0 | 0.056315 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038961 | false | 0.012987 | 0.038961 | 0 | 0.142857 | 0.051948 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3fabb8f64f1a0ec1780453c570c4d4d3b17bd3b1 | 14,326 | py | Python | tests/unit_test/data_processor/history_test.py | digiteinfotech/kairon | 6a2f0a056dbfe5c041fd9e00a6f5b878e339309e | [
"Apache-2.0"
] | 97 | 2020-08-18T10:07:48.000Z | 2022-03-26T18:33:37.000Z | tests/unit_test/data_processor/history_test.py | digiteinfotech/kairon | 6a2f0a056dbfe5c041fd9e00a6f5b878e339309e | [
"Apache-2.0"
] | 276 | 2020-08-27T23:24:35.000Z | 2022-03-31T09:43:30.000Z | tests/unit_test/data_processor/history_test.py | digiteinfotech/kairon | 6a2f0a056dbfe5c041fd9e00a6f5b878e339309e | [
"Apache-2.0"
] | 46 | 2020-09-11T13:29:41.000Z | 2022-03-08T12:27:17.000Z | import json
import os
from datetime import datetime
import pytest
from mongomock import MongoClient
from pymongo.collection import Collection
from pymongo.errors import ServerSelectionTimeoutError
from kairon.exceptions import AppException
from kairon.history.processor import HistoryProcessor
from kairon.shared.utils import Utility
class TestHistory:
@pytest.fixture(autouse=True)
def init_connection(self):
os.environ["system_file"] = "./tests/testing_data/tracker.yaml"
Utility.load_environment()
def history_conversations(self, *args, **kwargs):
json_data = json.load(
open("tests/testing_data/history/conversations_history.json")
)
return json_data[0]['events'], None
def get_history_conversations(self):
json_data = json.load(
open("tests/testing_data/history/conversations_history.json")
)
for event in json_data[0]['events'][15:]:
event['timestamp'] = datetime.utcnow().timestamp()
return json_data[0], None
@pytest.fixture
def mock_db_timeout(self, monkeypatch):
def _mock_db_timeout(*args, **kwargs):
raise ServerSelectionTimeoutError('Failed to connect')
monkeypatch.setattr(Collection, 'aggregate', _mock_db_timeout)
monkeypatch.setattr(Collection, 'find', _mock_db_timeout)
@pytest.fixture
def mock_fallback_user_data(self, monkeypatch):
def db_client(*args, **kwargs):
client = MongoClient(Utility.environment['tracker']['url'])
db = client.get_database("conversation")
conversations = db.get_collection("conversations")
history, _ = self.get_history_conversations()
conversations.insert(history)
return client, 'Loading host:mongodb://test_kairon:27016, db:conversation, collection:conversations '
monkeypatch.setattr(HistoryProcessor, "get_mongo_connection", db_client)
@pytest.fixture
def mock_mongo_client(self, monkeypatch):
def db_client(*args, **kwargs):
client = MongoClient(Utility.environment['tracker']['url'])
db = client.get_database("conversation")
conversations = db.get_collection("conversations")
history, _ = self.history_conversations()
conversations.insert_many(history)
return client, 'Loading host:mongodb://test_kairon:27016, db:conversation, collection:conversations'
monkeypatch.setattr(HistoryProcessor, "get_mongo_connection", db_client)
def test_fetch_chat_users_db_error(self, mock_db_timeout):
with pytest.raises(AppException) as e:
users = HistoryProcessor.fetch_chat_users(collection="tests")
assert len(users) == 0
assert str(e).__contains__('Could not connect to tracker: ')
def test_fetch_chat_users(self, mock_mongo_client):
users = HistoryProcessor.fetch_chat_users(collection="tests")
assert len(users) == 2
def test_fetch_chat_users_empty(self, mock_mongo_client):
users = HistoryProcessor.fetch_chat_users(collection="tests")
assert len(users) == 2
def test_fetch_chat_history_error(self, mock_db_timeout):
with pytest.raises(AppException):
history, message = HistoryProcessor.fetch_chat_history(sender="123", collection="tests")
assert len(history) == 0
assert message
def test_fetch_chat_history_empty(self, mock_mongo_client):
history, message = HistoryProcessor.fetch_chat_history(sender="123", collection="tests")
assert len(history) == 0
assert message
def test_fetch_chat_history(self, monkeypatch):
def events(*args, **kwargs):
json_data = json.load(open("tests/testing_data/history/conversation.json"))
return json_data['events'], 'Loading host:mongodb://test_kairon:27016, db:conversation, ' \
'collection:conversations '
monkeypatch.setattr(HistoryProcessor, "fetch_user_history", events)
history, message = HistoryProcessor.fetch_chat_history(
sender="5e564fbcdcf0d5fad89e3acd", collection="tests"
)
assert len(history) == 12
assert history[0]["event"]
assert history[0]["time"]
assert history[0]["date"]
assert history[0]["text"]
assert history[0]["intent"]
assert history[0]["confidence"]
assert message
def test_visitor_hit_fallback_error(self, mock_db_timeout):
hit_fall_back, message = HistoryProcessor.visitor_hit_fallback("tests")
assert hit_fall_back["fallback_count"] == 0
assert hit_fall_back["total_count"] == 0
print(message)
assert message
def test_visitor_hit_fallback(self, mock_fallback_user_data, monkeypatch):
hit_fall_back, message = HistoryProcessor.visitor_hit_fallback("conversations")
assert hit_fall_back["fallback_count"] == 1
assert hit_fall_back["total_count"] == 4
assert message
def test_visitor_hit_fallback_action_not_configured(self, mock_fallback_user_data, monkeypatch):
hit_fall_back, message = HistoryProcessor.visitor_hit_fallback("conversations")
assert hit_fall_back["fallback_count"] == 1
assert hit_fall_back["total_count"] == 4
assert message
def test_visitor_hit_fallback_custom_action(self, mock_fallback_user_data):
hit_fall_back, message = HistoryProcessor.visitor_hit_fallback("conversations",
fallback_action='utter_location_query')
assert hit_fall_back["fallback_count"] == 1
assert hit_fall_back["total_count"] == 4
assert message
def test_visitor_hit_fallback_nlu_fallback_configured(self, mock_fallback_user_data):
hit_fall_back, message = HistoryProcessor.visitor_hit_fallback("conversations",
fallback_action="action_default_fallback",
nlu_fallback_action="utter_please_rephrase")
assert hit_fall_back["fallback_count"] == 2
assert hit_fall_back["total_count"] == 4
assert message
def test_conversation_time_error(self, mock_db_timeout):
conversation_time, message = HistoryProcessor.conversation_time("tests")
assert not conversation_time
assert message
def test_conversation_time_empty(self, mock_mongo_client):
conversation_time, message = HistoryProcessor.conversation_time("tests")
assert not conversation_time
assert message
def test_conversation_time(self, mock_mongo_client):
conversation_time, message = HistoryProcessor.conversation_time("tests")
assert conversation_time == []
assert message
def test_conversation_steps_error(self, mock_db_timeout):
conversation_steps, message = HistoryProcessor.conversation_steps("tests")
assert not conversation_steps
assert message
def test_conversation_steps_empty(self, mock_mongo_client):
conversation_steps, message = HistoryProcessor.conversation_steps("tests")
assert not conversation_steps
assert message
def test_conversation_steps(self, mock_mongo_client):
conversation_steps, message = HistoryProcessor.conversation_steps("tests")
assert conversation_steps == []
assert message
def test_user_with_metrics(self, mock_mongo_client):
users, message = HistoryProcessor.user_with_metrics("tests")
assert users == []
assert message
def test_engaged_users_error(self, mock_db_timeout):
engaged_user, message = HistoryProcessor.engaged_users("tests")
assert engaged_user['engaged_users'] == 0
assert message
def test_engaged_users(self, mock_mongo_client):
engaged_user, message = HistoryProcessor.engaged_users("tests")
assert engaged_user['engaged_users'] == 0
assert message
def test_new_user_error(self, mock_db_timeout):
count_user, message = HistoryProcessor.new_users("tests")
assert count_user['new_users'] == 0
assert message
def test_new_user(self, mock_mongo_client):
count_user, message = HistoryProcessor.new_users("tests")
assert count_user['new_users'] == 0
assert message
def test_successful_conversation_error(self, mock_db_timeout):
conversation_steps, message = HistoryProcessor.successful_conversations("tests")
assert conversation_steps['successful_conversations'] == 0
assert message
def test_successful_conversation(self, mock_mongo_client):
conversation_steps, message = HistoryProcessor.successful_conversations("tests")
assert conversation_steps['successful_conversations'] == 0
assert message
def test_user_retention_error(self, mock_db_timeout):
retention, message = HistoryProcessor.user_retention("tests")
assert retention['user_retention'] == 0
assert message
def test_user_retention(self, mock_mongo_client):
retention, message = HistoryProcessor.user_retention("tests")
assert retention['user_retention'] == 0
assert message
def test_engaged_users_range_error(self, mock_db_timeout):
engaged_user, message = HistoryProcessor.engaged_users_range("tests")
assert engaged_user["engaged_user_range"] == {}
assert message
def test_engaged_users_range(self, mock_mongo_client):
engaged_user, message = HistoryProcessor.engaged_users_range("tests")
assert engaged_user["engaged_user_range"] == {}
assert message
def test_new_user_range_error(self, mock_db_timeout):
count_user, message = HistoryProcessor.new_users_range("tests")
assert count_user['new_user_range'] == {}
assert message
def test_new_user_range(self, mock_mongo_client):
count_user, message = HistoryProcessor.new_users_range("tests")
assert count_user['new_user_range'] == {}
assert message
def test_successful_conversation_range_error(self, mock_db_timeout):
conversation_steps, message = HistoryProcessor.successful_conversation_range("tests")
assert conversation_steps["success_conversation_range"] == {}
assert message
def test_successful_conversation_range(self, mock_mongo_client):
conversation_steps, message = HistoryProcessor.successful_conversation_range("tests")
assert conversation_steps["success_conversation_range"] == {}
assert message
def test_user_retention_range_error(self, mock_db_timeout):
retention, message = HistoryProcessor.user_retention_range("tests")
assert retention['retention_range'] == {}
assert message
def test_user_retention_range(self, mock_mongo_client):
retention, message = HistoryProcessor.user_retention_range("tests")
assert retention['retention_range'] == {}
assert message
def test_fallback_range_error(self, mock_db_timeout):
f_count, message = HistoryProcessor.fallback_count_range("tests")
assert f_count["fallback_counts"] == {}
assert message
def test_fallback_range(self, mock_mongo_client):
f_count, message = HistoryProcessor.fallback_count_range("tests")
assert f_count["fallback_counts"] == {}
assert message
def test_flatten_conversation_error(self, mock_db_timeout):
f_count, message = HistoryProcessor.flatten_conversations("tests")
assert f_count["conversation_data"] == []
assert message
def test_flatten_conversation_range(self, mock_mongo_client):
f_count, message = HistoryProcessor.flatten_conversations("tests")
assert f_count["conversation_data"] == []
assert message
def test_total_conversation_range_error(self, mock_db_timeout):
conversation_steps, message = HistoryProcessor.total_conversation_range("tests")
assert conversation_steps["total_conversation_range"] == {}
assert message
def test_total_conversation_range(self, mock_mongo_client):
conversation_steps, message = HistoryProcessor.total_conversation_range("tests")
assert conversation_steps["total_conversation_range"] == {}
assert message
def test_top_intent_error(self, mock_db_timeout):
with pytest.raises(Exception):
HistoryProcessor.top_n_intents("tests")
def test_top_intent(self, mock_mongo_client):
top_n, message = HistoryProcessor.top_n_intents("tests")
assert top_n == []
assert message
def test_top_action_error(self, mock_db_timeout):
with pytest.raises(Exception):
HistoryProcessor.top_n_actions("tests")
def test_top_action(self, mock_mongo_client):
top_n, message = HistoryProcessor.top_n_actions("tests")
assert top_n == []
assert message
def test_conversation_step_range_error(self, mock_db_timeout):
conversation_steps, message = HistoryProcessor.average_conversation_step_range("tests")
assert conversation_steps["Conversation_step_range"] == {}
assert message
def test_conversation_step_range(self, mock_mongo_client):
conversation_steps, message = HistoryProcessor.average_conversation_step_range("tests")
assert conversation_steps["Conversation_step_range"] == {}
assert message
def test_wordcloud_error(self, mock_db_timeout):
with pytest.raises(Exception):
HistoryProcessor.word_cloud("tests")
def test_wordcloud(self, mock_mongo_client):
conversation, message = HistoryProcessor.word_cloud("tests")
assert conversation == ""
assert message
def test_wordcloud_data(self, mock_fallback_user_data):
conversation, message = HistoryProcessor.word_cloud("conversations")
assert conversation
assert message
def test_wordcloud_data_error(self, mock_fallback_user_data):
with pytest.raises(Exception):
HistoryProcessor.word_cloud("conversations", u_bound=.5, l_bound=.6)
| 42.259587 | 115 | 0.699148 | 1,571 | 14,326 | 6.027371 | 0.096117 | 0.036963 | 0.072658 | 0.090823 | 0.838315 | 0.799028 | 0.768613 | 0.72711 | 0.696061 | 0.662372 | 0 | 0.006045 | 0.214784 | 14,326 | 338 | 116 | 42.384615 | 0.835719 | 0 | 0 | 0.538745 | 0 | 0 | 0.114066 | 0.043002 | 0 | 0 | 0 | 0 | 0.372694 | 1 | 0.221402 | false | 0 | 0.0369 | 0 | 0.280443 | 0.00369 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
b20eedb63e7c4cd0b2bbcbf625af76ebb0a546af | 70 | py | Python | vcv/pyimagesearch/alignment/__init__.py | mhudnell/vaccination-card-verification | f2db657e80ac77b2845192cc606b0d1e9a66e8a9 | [
"MIT"
] | 5 | 2021-05-12T03:03:17.000Z | 2022-02-18T02:50:42.000Z | vcv/pyimagesearch/alignment/__init__.py | mhudnell/vaccination-card-verification | f2db657e80ac77b2845192cc606b0d1e9a66e8a9 | [
"MIT"
] | 3 | 2021-05-12T17:45:12.000Z | 2021-08-11T14:52:51.000Z | vcv/pyimagesearch/alignment/__init__.py | mhudnell/vaccination-card-verification | f2db657e80ac77b2845192cc606b0d1e9a66e8a9 | [
"MIT"
] | 2 | 2021-05-12T17:42:29.000Z | 2021-08-28T08:17:45.000Z | # import the necessary packages
from .align_images import align_images | 35 | 38 | 0.857143 | 10 | 70 | 5.8 | 0.7 | 0.37931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114286 | 70 | 2 | 38 | 35 | 0.935484 | 0.414286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
b75573a3194017c104f6fea61214e7db3b894a28 | 2,233 | py | Python | tests/test_comparaison.py | cohenjer/PIRS8 | 187a617c7c62d603c7c7edcb339f44d07300bd56 | [
"MIT"
] | null | null | null | tests/test_comparaison.py | cohenjer/PIRS8 | 187a617c7c62d603c7c7edcb339f44d07300bd56 | [
"MIT"
] | null | null | null | tests/test_comparaison.py | cohenjer/PIRS8 | 187a617c7c62d603c7c7edcb339f44d07300bd56 | [
"MIT"
] | null | null | null |
import numpy as np
from src._comparaison import comparaison,compar_time
def test_comparaison(i):
"""
Test of comparaison
Need to change plot x,y label and title in comparaison to run the test.
Parameters
----------
i : int
i=1, plot data fitting error for simple case.
i=2, plot factor error for simple case.
i=3, plot data fitting error for complicated case.
i=4, plot factor error for complicated case.
Returns
-------
None.
"""
I=50
J=50
K=50
r=10 # rank
n_samples=int(10*r*np.log(r)+1) # nb of randomized samples
nb_rand=10 # nb of random initialization
if i ==1 :
# simple case data fitting error
comparaison(I,J,K,r,nb_rand,n_samples)
if i ==2 :
# simple case factors error
comparaison(I,J,K,r,nb_rand,n_samples,False,list_factors=True)
if i == 3 :
# complicated case data fitting error
comparaison(I,J,K,r,nb_rand,n_samples,scale=True)
if i==4 :
# complicated case factors error
comparaison(I,J,K,r,nb_rand,n_samples,False,list_factors=True,scale=True)
def test_compar_time(i):
"""
Test of compar_time
Need to change plot x,y label and title in compar_time to run the test.
Parameters
----------
i : int
i=1, plot data fitting error for simple case.
i=2, plot factor error for simple case.
i=3, plot data fitting error for complicated case.
i=4, plot factor error for complicated case.
Returns
-------
None.
"""
I=50
J=50
K=50
r=10 # rank
n_samples=int(10*r*np.log(r)+1) # nb of randomized samples
nb_rand=10 # nb of random initialization
if i==1:
# simple case data fitting error
compar_time(I,J,K,r,nb_rand,n_samples)
if i==2:
# simple case factors error
compar_time(I,J,K,r,nb_rand,n_samples,list_factors=True)
if i==3:
# complicated case data fitting error
compar_time(I,J,K,r,nb_rand,n_samples,scale=True)
if i==4:
# complicated case factors error
compar_time(I,J,K,r,nb_rand,n_samples,list_factors=True,scale=True)
| 26.583333 | 81 | 0.610837 | 352 | 2,233 | 3.775568 | 0.173295 | 0.060196 | 0.096313 | 0.024078 | 0.886381 | 0.872837 | 0.872837 | 0.872837 | 0.872837 | 0.872837 | 0 | 0.026415 | 0.287953 | 2,233 | 83 | 82 | 26.903614 | 0.809434 | 0.476041 | 0 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4d33a38a6214759459ddc5a33d25736119870762 | 56 | py | Python | solutions/polynomial_2.py | adityarn/MAV110_PythonModule | c3ee6457ba0e4d2cae04f3f6a138d0b473bb4f8e | [
"MIT"
] | 2 | 2021-11-25T13:08:27.000Z | 2021-11-25T13:08:30.000Z | solutions/polynomial_2.py | adityarn/MAV110_PythonModule | c3ee6457ba0e4d2cae04f3f6a138d0b473bb4f8e | [
"MIT"
] | null | null | null | solutions/polynomial_2.py | adityarn/MAV110_PythonModule | c3ee6457ba0e4d2cae04f3f6a138d0b473bb4f8e | [
"MIT"
] | null | null | null | def poly(x, a=2, b=3, c=10):
return a*x**2 + b*x + c | 28 | 28 | 0.482143 | 16 | 56 | 1.6875 | 0.625 | 0.148148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119048 | 0.25 | 56 | 2 | 29 | 28 | 0.52381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
4d9da731d898bd130689e6d8d0e632af850e06f0 | 19,104 | py | Python | tests/settree_sanity.py | ranigb/Set-Tree | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | [
"MIT"
] | 17 | 2021-07-26T01:03:59.000Z | 2022-01-23T10:31:56.000Z | tests/settree_sanity.py | ranigb/Set-Tree | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | [
"MIT"
] | 2 | 2021-12-10T09:53:48.000Z | 2022-01-25T17:08:41.000Z | tests/settree_sanity.py | ranigb/Set-Tree | fa3971f9a8ef98dbfd0f6de654efcde3006a197b | [
"MIT"
] | 3 | 2021-09-14T11:39:35.000Z | 2022-01-23T06:51:48.000Z | import os
import numpy as np
import random
import unittest
from timeit import default_timer as timer
from datetime import timedelta
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import fetch_openml
from settree.set_data import SetDataset, set_object_to_matrix
from settree.set_tree import SetTree
from settree.set_rf import SetRandomForestClassifier
from exps.eval_utils import split_to_random_sets
def get_first_quarter_data(num_samples, min_items_set=2, max_items_set=10, dim=2):
def inject_samples_in_first_quarter(set_of_samples, min=1, max=1, dim=2):
num = random.choice(range(min, max + 1))
pos_points = np.random.uniform(low=0, high=1, size=(num, dim))
set_of_samples[:num, :] = pos_points
return set_of_samples
def sample_point_not_from_first_quarter(dim=2):
# sample a quarter (not the first)
while True:
r = np.random.normal(0, 1, dim) > 0
if sum(r) < dim:
break
# sample a point from the quarter
p = []
for i in r:
# pos
if i:
p.append(np.random.uniform(low=0, high=1))
# neg
else:
p.append(np.random.uniform(low=-1, high=0))
return tuple(p)
def sample_set(num, dim):
return np.stack([sample_point_not_from_first_quarter(dim) for _ in range(num)])
s_1 = [sample_set(random.choice(range(min_items_set, max_items_set)), dim) for _ in range(num_samples // 2)]
s_2 = [sample_set(random.choice(range(min_items_set, max_items_set)), dim) for _ in range(num_samples // 2)]
s_2 = [inject_samples_in_first_quarter(i, min=1, max=1, dim=dim) for i in s_2]
data = s_1 + s_2
y = np.concatenate([np.zeros(len(s_1)), np.ones(len(s_2))]).astype(int)
indx = np.arange(len(y))
random.shuffle(indx)
return [data[i] for i in indx], y[indx]
def get_data_rect_vs_diag(num_samples, min_items_set, max_items_set, dim=2):
def sample_set_rect(set_size, arange=(0,1), dim=2):
return np.random.uniform(low=arange[0], high=arange[1], size=(set_size, dim))
def sample_set_diag(set_size, arange=(0,1), dim=2):
p = np.random.uniform(low=arange[0], high=arange[1], size=set_size)
return np.repeat(p.reshape(-1, 1), dim, axis=1)
s_1 = [sample_set_rect(random.choice(range(min_items_set, max_items_set)), (0, 1), dim)
for _ in range(num_samples // 2)]
s_2 = [sample_set_diag(random.choice(range(min_items_set, max_items_set)), (0, 1), dim)
for _ in range(num_samples // 2)]
data = s_1 + s_2
y = np.concatenate([np.zeros(len(s_1)), np.ones(len(s_2))]).astype(int)
indx = np.arange(len(y))
random.shuffle(indx)
return np.array(data)[indx].tolist(), y[indx]
class TestToyProblems(unittest.TestCase):
test_counter = 1
def __init__(self, splitter='set', use_attention_set=True, attention_set_limit=1, use_attention_set_comp=True):
self.tree_args = {'splitter': splitter,
'use_attention_set': use_attention_set,
'use_attention_set_comp': use_attention_set_comp,
'attention_set_limit': attention_set_limit}
print('Test args: {}'.format(self.tree_args))
def init(self, name):
np.random.seed(42)
random.seed(42)
print('####################({})####################'.format(self.test_counter))
print('Start test: {}'.format(name))
self.test_counter += 1
def start_timer(self):
self.start = timer()
def end_timer(self):
end = timer()
print('Time: {}'.format(timedelta(seconds=end - self.start)))
def end(self):
print('############################################\n')
def first_quarter(self):
self.init('first_quarter')
set_size = 10
train_data, train_y = get_first_quarter_data(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=2)
test_data, test_y = get_first_quarter_data(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=2)
ds_train = SetDataset(records=train_data, is_init=True)
ds_test = SetDataset(records=test_data, is_init=True)
dt = SetTree(**self.tree_args)
self.start_timer()
dt.fit(ds_train, train_y)
self.end_timer()
train_acc = (dt.predict(ds_train) == train_y).mean()
test_acc = (dt.predict(ds_test) == test_y).mean()
print('Results : set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size,train_acc, test_acc))
print(dt)
self.end()
self.assertGreaterEqual(test_acc, 0.95)
def first_quarter_high_dim(self):
self.init('first_quarter_high_dim')
set_size = 10
train_data, train_y = get_first_quarter_data(num_samples=5000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=4)
test_data, test_y = get_first_quarter_data(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=4)
ds_train = SetDataset(records=train_data, is_init=True)
ds_test = SetDataset(records=test_data, is_init=True)
dt = SetTree(**self.tree_args)
self.start_timer()
dt.fit(ds_train, train_y)
self.end_timer()
train_acc = (dt.predict(ds_train) == train_y).mean()
test_acc = (dt.predict(ds_test) == test_y).mean()
print('Results : set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size, train_acc, test_acc))
print(dt)
self.end()
self.assertGreaterEqual(test_acc, 0.9)
def first_quarter_high_dim_varying_lengths(self):
self.init('first_quarter_high_dim_varying_lengths')
set_size = 10
train_data, train_y = get_first_quarter_data(num_samples=5000,
min_items_set=5,
max_items_set=15,
dim=4)
test_data, test_y = get_first_quarter_data(num_samples=1000,
min_items_set=5,
max_items_set=15,
dim=4)
ds_train = SetDataset(records=train_data, is_init=True)
ds_test = SetDataset(records=test_data, is_init=True)
dt = SetTree(**self.tree_args)
self.start_timer()
dt.fit(ds_train, train_y)
self.end_timer()
train_acc = (dt.predict(ds_train) == train_y).mean()
test_acc = (dt.predict(ds_test) == test_y).mean()
print('Results : set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size, train_acc, test_acc))
print(dt)
self.end()
self.assertGreaterEqual(test_acc, 0.95)
def first_quarter_vs_sklearn(self):
self.init('first_quarter_vs_sklearn')
set_size = 10
train_data, train_y = get_first_quarter_data(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=2)
test_data, test_y = get_first_quarter_data(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=2)
ds_train = SetDataset(records=train_data, is_init=True)
ds_test = SetDataset(records=test_data, is_init=True)
dt = SetTree(**self.tree_args)
self.start_timer()
dt.fit(ds_train, train_y)
self.end_timer()
train_acc = (dt.predict(ds_train) == train_y).mean()
test_acc = (dt.predict(ds_test) == test_y).mean()
print('Results : set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size, train_acc, test_acc))
print(dt)
sklearn_dt = DecisionTreeClassifier(criterion="entropy")
sk_train_x = set_object_to_matrix(ds_train, dt.operations)
sk_test_x = set_object_to_matrix(ds_test, dt.operations)
sklearn_dt = sklearn_dt.fit(sk_train_x, train_y)
sklearn_train_acc = (sklearn_dt.predict(sk_train_x) == train_y).mean()
sklearn_test_acc = (sklearn_dt.predict(sk_test_x) == test_y).mean()
print('Results sklearn: set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size, sklearn_train_acc, sklearn_test_acc))
print('Tree structure (depth, n_nodes): sklearn: ({}, {}) setDT: ({}, {})'.format(sklearn_dt.get_depth(),
sklearn_dt.tree_.node_count,
dt.depth, dt.n_nodes))
self.end()
self.assertGreaterEqual(test_acc, sklearn_test_acc)
def rect_vs_diagonal(self):
self.init('rect_vs_diagonal')
set_size=10
train_data, train_y = get_data_rect_vs_diag(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=2)
test_data, test_y = get_data_rect_vs_diag(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=2)
ds_train = SetDataset(records=train_data, is_init=True)
ds_test = SetDataset(records=test_data, is_init=True)
dt = SetTree(**self.tree_args)
self.start_timer()
dt.fit(ds_train, train_y)
self.end_timer()
train_acc = (dt.predict(ds_train) == train_y).mean()
test_acc = (dt.predict(ds_test) == test_y).mean()
print('Results : set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size, train_acc, test_acc))
print(dt)
self.end()
self.assertGreaterEqual(test_acc, 0.95)
def rect_vs_diagonal_high_dim(self):
self.init('rect_vs_diagonal_high_dim')
set_size = 10
train_data, train_y = get_data_rect_vs_diag(num_samples=5000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=8)
test_data, test_y = get_data_rect_vs_diag(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=8)
ds_train = SetDataset(records=train_data, is_init=True)
ds_test = SetDataset(records=test_data, is_init=True)
dt = SetTree(**self.tree_args)
self.start_timer()
dt.fit(ds_train, train_y)
self.end_timer()
train_acc = (dt.predict(ds_train) == train_y).mean()
test_acc = (dt.predict(ds_test) == test_y).mean()
print('Results : set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size, train_acc, test_acc))
print(dt)
self.end()
self.assertGreaterEqual(test_acc, 0.95)
def rect_vs_diagonal_vs_sklearn(self):
self.init('rect_vs_diagonal_vs_sklearn')
set_size = 10
train_data, train_y = get_data_rect_vs_diag(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=2)
test_data, test_y = get_data_rect_vs_diag(num_samples=1000,
min_items_set=set_size,
max_items_set=set_size + 1,
dim=2)
ds_train = SetDataset(records=train_data, is_init=True)
ds_test = SetDataset(records=test_data, is_init=True)
dt = SetTree(**self.tree_args)
self.start_timer()
dt.fit(ds_train, train_y)
self.end_timer()
train_acc = (dt.predict(ds_train) == train_y).mean()
test_acc = (dt.predict(ds_test) == test_y).mean()
print('Results : set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size, train_acc, test_acc))
print(dt)
sklearn_dt = DecisionTreeClassifier(criterion="entropy")
sk_train_x = set_object_to_matrix(ds_train, dt.operations)
sk_test_x = set_object_to_matrix(ds_test, dt.operations)
sklearn_dt = sklearn_dt.fit(sk_train_x, train_y)
sklearn_train_acc = (sklearn_dt.predict(sk_train_x) == train_y).mean()
sklearn_test_acc = (sklearn_dt.predict(sk_test_x) == test_y).mean()
print('Results sklearn: set_size={} | train acc {:.4f} | test acc : {:.4f}'.format(set_size,
sklearn_train_acc,
sklearn_test_acc))
print('Tree structure (depth, n_nodes): sklearn: ({}, {}) setDT: ({}, {})'.format(sklearn_dt.get_depth(),
sklearn_dt.tree_.node_count,
dt.depth, dt.n_nodes))
self.end()
self.assertGreaterEqual(test_acc, sklearn_test_acc)
def classify_mnist(self):
self.init('classify_mnist')
X, y = fetch_openml('mnist_784', version=1, return_X_y=True,
data_home=os.path.join(os.path.abspath('__file__' + '/../../'), 'data'))
y = y.astype(int)
X_0 = X[y == 0, :]
X_1 = X[y == 9, :]
X_2 = X[y == 8, :]
X_3 = X[y == 6, :]
X_0 = split_to_random_sets(X_0, min_size=2, max_size=30)
X_1 = split_to_random_sets(X_1, min_size=2, max_size=30)
X_2 = split_to_random_sets(X_2, min_size=2, max_size=30)
X_3 = split_to_random_sets(X_3, min_size=2, max_size=30)
split = int(((len(X_0) + len(X_1) + len(X_2) + len(X_3)) / 4) * 0.2)
data = X_0[:split] + X_1[:split] + X_2[:split] + X_3[:split]
train_y = np.array([0] * len(X_0[:split]) + [1] * len(X_1[:split]) + [2] * len(X_2[:split]) + [3] * len(X_3[:split]))
ds_train = SetDataset(records=data, is_init=True)
data = X_0[split:] + X_1[split:] + X_2[split:] + X_3[split:]
test_y = np.array([0] * len(X_0[split:]) + [1] * len(X_1[split:]) + [2] * len(X_2[split:]) + [3] * len(X_3[split:]))
ds_test = SetDataset(records=data)
dt = SetTree(**self.tree_args)
self.start_timer()
dt.fit(ds_train, train_y)
self.end_timer()
train_acc = (dt.predict(ds_train) == train_y).mean()
test_acc = (dt.predict(ds_test) == test_y).mean()
print('Results : train acc {:.4f} | test acc : {:.4f}'.format(train_acc, test_acc))
print(dt)
self.end()
self.assertGreaterEqual(test_acc, 0.93)
def classify_mnist_rf(self):
self.init('classify_mnist_rf')
X, y = fetch_openml('mnist_784', version=1, return_X_y=True,
data_home=os.path.join(os.path.abspath('__file__' + '/../../'), 'data'))
y = y.astype(int)
X_0 = X[y == 0, :]
X_1 = X[y == 9, :]
X_2 = X[y == 8, :]
X_3 = X[y == 6, :]
X_0 = split_to_random_sets(X_0, min_size=2, max_size=30)
X_1 = split_to_random_sets(X_1, min_size=2, max_size=30)
X_2 = split_to_random_sets(X_2, min_size=2, max_size=30)
X_3 = split_to_random_sets(X_3, min_size=2, max_size=30)
split = int(((len(X_0) + len(X_1) + len(X_2) + len(X_3)) / 4) * 0.2)
data = X_0[:split] + X_1[:split] + X_2[:split] + X_3[:split]
train_y = [0] * len(X_0[:split]) + [1] * len(X_1[:split]) + [2] * len(X_2[:split]) + [3] * len(X_3[:split])
ds_train = SetDataset(records=data, is_init=True)
data = X_0[split:] + X_1[split:] + X_2[split:] + X_3[split:]
test_y = [0] * len(X_0[split:]) + [1] * len(X_1[split:]) + [2] * len(X_2[split:]) + [3] * len(X_3[split:])
ds_test = SetDataset(records=data, is_init=True)
dt = SetRandomForestClassifier(n_estimators=4,
criterion="entropy",
max_samples=0.5,
max_depth=6,
max_features="auto",
splitter=self.tree_args['splitter'],
use_active_set=self.tree_args['use_active_set'],
active_set_limit=self.tree_args['active_set_limit'],
bootstrap=True,
n_jobs=4,
random_state=None,
verbose=3)
self.start_timer()
dt.fit(ds_train, train_y)
self.end_timer()
train_acc = (dt.predict(ds_train) == train_y).mean()
test_acc = (dt.predict(ds_test) == test_y).mean()
print('Results : train acc {:.4f} | test acc : {:.4f}'.format(train_acc, test_acc))
print(dt)
self.end()
self.assertGreaterEqual(test_acc, 0.94)
if __name__ == '__main__':
np.random.seed(42)
toy_tests = TestToyProblems(splitter='sklearn',
use_attention_set=True,
use_attention_set_comp=True,
attention_set_limit=3)
toy_tests.first_quarter()
toy_tests.first_quarter_high_dim()
toy_tests.first_quarter_high_dim_varying_lengths()
toy_tests.first_quarter_vs_sklearn()
toy_tests.rect_vs_diagonal()
toy_tests.rect_vs_diagonal_high_dim()
toy_tests.rect_vs_diagonal_vs_sklearn()
toy_tests.classify_mnist()
print('######## End tests ########') | 44.635514 | 138 | 0.530098 | 2,449 | 19,104 | 3.793793 | 0.073908 | 0.039931 | 0.028415 | 0.038747 | 0.813153 | 0.774082 | 0.740824 | 0.712948 | 0.712948 | 0.712948 | 0 | 0.027525 | 0.349613 | 19,104 | 428 | 139 | 44.635514 | 0.720241 | 0.003769 | 0 | 0.643478 | 0 | 0 | 0.069844 | 0.013033 | 0 | 0 | 0 | 0 | 0.026087 | 1 | 0.06087 | false | 0 | 0.034783 | 0.005797 | 0.121739 | 0.081159 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4da843b5f9f237725dd059cb97f1d64637975ee2 | 335 | py | Python | spacy_pattern_builder/__init__.py | cyclecycle/spacy-dependency-pattern-builder | 51a1eb9a2cbd56163103e0e903af585442f8f912 | [
"MIT"
] | 32 | 2019-11-05T00:19:20.000Z | 2021-04-28T09:08:53.000Z | spacy_pattern_builder/__init__.py | cyclecycle/spacy-dependency-pattern-builder | 51a1eb9a2cbd56163103e0e903af585442f8f912 | [
"MIT"
] | 1 | 2020-01-28T09:06:14.000Z | 2020-09-19T21:28:06.000Z | spacy_pattern_builder/__init__.py | cyclecycle/spacy-dependency-pattern-builder | 51a1eb9a2cbd56163103e0e903af585442f8f912 | [
"MIT"
] | 6 | 2020-01-27T10:21:40.000Z | 2022-02-21T18:44:31.000Z | from spacy_pattern_builder.build import build_dependency_pattern
import spacy_pattern_builder.util
import spacy_pattern_builder.exceptions
import spacy_pattern_builder.mutate
import spacy_pattern_builder.match
from spacy_pattern_builder.mutate import yield_pattern_permutations, yield_node_level_pattern_variants, yield_extended_trees
| 47.857143 | 124 | 0.922388 | 46 | 335 | 6.23913 | 0.391304 | 0.250871 | 0.397213 | 0.348432 | 0.216028 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053731 | 335 | 6 | 125 | 55.833333 | 0.905363 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
4dcc8e62e2a397e4badab9215992f2905786e205 | 12,057 | py | Python | tests/zia/test_users.py | LetMeR00t/pyZscaler | 6b8027a4f76fdc1f95321558251a91d954218d9f | [
"MIT"
] | 16 | 2021-07-09T00:20:31.000Z | 2022-02-17T19:29:26.000Z | tests/zia/test_users.py | LetMeR00t/pyZscaler | 6b8027a4f76fdc1f95321558251a91d954218d9f | [
"MIT"
] | 62 | 2021-07-21T03:42:09.000Z | 2022-03-18T09:08:20.000Z | tests/zia/test_users.py | LetMeR00t/pyZscaler | 6b8027a4f76fdc1f95321558251a91d954218d9f | [
"MIT"
] | 8 | 2021-09-11T08:14:53.000Z | 2022-03-25T20:14:41.000Z | import pytest
import responses
from box import Box
from responses import matchers
from tests.conftest import stub_sleep
@pytest.fixture(name="users")
def fixture_users():
return [
{
"id": 1,
"name": "Test User A",
"email": "testusera@example.com",
"groups": {"id": 1, "name": "test"},
"department": {"id": 1, "name": "test_department"},
"comments": "Test",
"adminUser": False,
"isNonEditable": False,
"disabled": False,
"deleted": False,
},
{
"id": 2,
"name": "Test User B",
"email": "testuserb@example.com",
"groups": {"id": 1, "name": "test"},
"department": {"id": 1, "name": "test_department"},
"adminUser": True,
"isNonEditable": False,
"disabled": True,
"deleted": False,
},
]
@pytest.fixture(name="groups")
def fixture_groups():
return [
{"id": 1, "name": "Group A"},
{"id": 2, "name": "Group B"},
]
@pytest.fixture(name="departments")
def fixture_depts():
return [
{"id": 1, "name": "Dept A"},
{"id": 2, "name": "Dept B"},
]
@responses.activate
def test_users_add_user(zia, users):
responses.add(
method="POST",
url="https://zsapi.zscaler.net/api/v1/users",
json=users[0],
status=200,
match=[
matchers.json_params_matcher(
{
"name": "Test User A",
"email": "testusera@example.com",
"groups": {"id": "1"},
"department": {"id": "1"},
"comments": "Test",
}
)
],
)
resp = zia.users.add_user(
name="Test User A",
email="testusera@example.com",
groups={"id": "1"},
department={"id": "1"},
comments="Test",
)
assert isinstance(resp, dict)
assert resp.id == 1
assert resp.admin_user is False
assert resp.comments == "Test"
@responses.activate
def test_users_get_user_by_id(users, zia):
responses.add(
method="GET",
url="https://zsapi.zscaler.net/api/v1/users/1",
json=users[0],
status=200,
)
resp = zia.users.get_user("1")
assert isinstance(resp, dict)
assert resp.id == 1
@responses.activate
def test_users_get_user_by_email(users, zia):
responses.add(
method="GET",
url="https://zsapi.zscaler.net/api/v1/users?search=testuserb@example.com&page=1",
json=[users[1]],
status=200,
)
responses.add(
method="GET",
url="https://zsapi.zscaler.net/api/v1/users?search=testuserb@example.com&page=2",
json=[],
status=200,
)
resp = zia.users.get_user(email="testuserb@example.com")
assert isinstance(resp, Box)
assert resp.id == 2
@responses.activate
def test_users_get_user_error(zia):
with pytest.raises(Exception) as e_info:
resp = zia.users.get_user("1", email="test@example.com")
@responses.activate
def test_users_update_user(zia, users):
updated_user = users[0]
updated_user["name"] = "Test User C"
updated_user["comments"] = "Updated Test"
responses.add(
responses.GET,
"https://zsapi.zscaler.net/api/v1/users/1",
json=users[0],
status=200,
)
responses.add(
responses.PUT,
url="https://zsapi.zscaler.net/api/v1/users/1",
json=updated_user,
match=[matchers.json_params_matcher(updated_user)],
)
resp = zia.users.update_user("1", name="Test User C", comments="Updated Test")
assert isinstance(resp, Box)
assert resp.name == updated_user["name"]
assert resp.comments == updated_user["comments"]
@responses.activate
@stub_sleep
def test_list_users_with_one_page(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/users",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/users",
json=items[100:200],
status=200,
)
resp = zia.users.list_users(max_pages=1, page_size=100)
assert isinstance(resp, list)
assert resp[50].id == 50
assert len(resp) == 100
@responses.activate
@stub_sleep
def test_list_users_with_two_pages(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/users",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/users",
json=items[100:200],
status=200,
)
resp = zia.users.list_users(max_pages=2, page_size=100)
assert isinstance(resp, list)
assert resp[50].id == 50
assert resp[150].id == 150
assert len(resp) == 200
@responses.activate
@stub_sleep
def test_list_users_with_max_items_1(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/users",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/users",
json=items[100:200],
status=200,
)
resp = zia.users.list_users(max_items=1)
assert isinstance(resp, list)
assert len(resp) == 1
@responses.activate
@stub_sleep
def test_list_users_with_max_items_150(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/users",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/users",
json=items[100:200],
status=200,
)
resp = zia.users.list_users(max_items=150)
assert isinstance(resp, list)
assert len(resp) == 150
@responses.activate
@stub_sleep
def test_list_groups_with_one_page(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/groups",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/groups",
json=items[100:200],
status=200,
)
resp = zia.users.list_groups(max_pages=1, page_size=100)
assert isinstance(resp, list)
assert resp[50].id == 50
assert len(resp) == 100
@responses.activate
@stub_sleep
def test_list_groups_with_two_pages(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/groups",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/groups",
json=items[100:200],
status=200,
)
resp = zia.users.list_groups(max_pages=2, page_size=100)
assert isinstance(resp, list)
assert resp[50].id == 50
assert resp[150].id == 150
assert len(resp) == 200
@responses.activate
@stub_sleep
def test_list_groups_with_max_items_1(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/groups",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/groups",
json=items[100:200],
status=200,
)
resp = zia.users.list_groups(max_items=1)
assert isinstance(resp, list)
assert len(resp) == 1
@responses.activate
@stub_sleep
def test_list_groups_with_max_items_150(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/groups",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/groups",
json=items[100:200],
status=200,
)
resp = zia.users.list_groups(max_items=150)
assert isinstance(resp, list)
assert len(resp) == 150
@responses.activate
def test_users_get_group(zia, groups):
responses.add(
method="GET",
url="https://zsapi.zscaler.net/api/v1/groups/1",
json=groups[0],
status=200,
)
resp = zia.users.get_group("1")
assert isinstance(resp, dict)
assert resp.id == 1
@responses.activate
@stub_sleep
def test_list_departments_with_one_page(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/departments",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/departments",
json=items[100:200],
status=200,
)
resp = zia.users.list_departments(max_pages=1, page_size=100)
assert isinstance(resp, list)
assert resp[50].id == 50
assert len(resp) == 100
@responses.activate
@stub_sleep
def test_list_departments_with_two_pages(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/departments",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/departments",
json=items[100:200],
status=200,
)
resp = zia.users.list_departments(max_pages=2, page_size=100)
assert isinstance(resp, list)
assert resp[50].id == 50
assert resp[150].id == 150
assert len(resp) == 200
@responses.activate
@stub_sleep
def test_list_departments_with_max_items_1(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/departments",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/departments",
json=items[100:200],
status=200,
)
resp = zia.users.list_departments(max_items=1)
assert isinstance(resp, list)
assert len(resp) == 1
@responses.activate
@stub_sleep
def test_list_departments_with_max_items_150(zia, paginated_items):
items = paginated_items(200)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/departments",
json=items[0:100],
status=200,
)
responses.add(
responses.GET,
url="https://zsapi.zscaler.net/api/v1/departments",
json=items[100:200],
status=200,
)
resp = zia.users.list_departments(max_items=150)
assert isinstance(resp, list)
assert len(resp) == 150
@responses.activate
def test_users_get_department(zia, departments):
responses.add(
method="GET",
url="https://zsapi.zscaler.net/api/v1/departments/1",
json=departments[0],
status=200,
)
resp = zia.users.get_department("1")
assert isinstance(resp, dict)
assert resp.id == 1
@responses.activate
def test_users_delete_user(zia):
responses.add(method="DELETE", url="https://zsapi.zscaler.net/api/v1/users/1", status=204)
resp = zia.users.delete_user("1")
assert resp == 204
@responses.activate
def test_users_bulk_delete_users(zia):
user_ids = ["1", "2"]
responses.add(
responses.POST,
url="https://zsapi.zscaler.net/api/v1/users/bulkDelete",
status=204,
json={"ids": user_ids},
match=[matchers.json_params_matcher({"ids": user_ids})],
)
resp = zia.users.bulk_delete_users(["1", "2"])
assert isinstance(resp, dict)
assert resp.ids == ["1", "2"]
| 24.017928 | 94 | 0.601642 | 1,522 | 12,057 | 4.635348 | 0.063732 | 0.057831 | 0.081928 | 0.096386 | 0.829908 | 0.799717 | 0.782707 | 0.763855 | 0.751949 | 0.720907 | 0 | 0.051008 | 0.255287 | 12,057 | 501 | 95 | 24.065868 | 0.734714 | 0 | 0 | 0.628713 | 0 | 0.004951 | 0.170772 | 0.008709 | 0 | 0 | 0 | 0 | 0.126238 | 1 | 0.059406 | false | 0 | 0.012376 | 0.007426 | 0.079208 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4df8ad40249c20920d478e7850c6dcce05108f8e | 129 | py | Python | tests/helpers.py | raymundl/firepit | 5b913806eef646c02bd55e301b19baa052aa29d5 | [
"Apache-2.0"
] | null | null | null | tests/helpers.py | raymundl/firepit | 5b913806eef646c02bd55e301b19baa052aa29d5 | [
"Apache-2.0"
] | null | null | null | tests/helpers.py | raymundl/firepit | 5b913806eef646c02bd55e301b19baa052aa29d5 | [
"Apache-2.0"
] | null | null | null | from firepit.sqlitestorage import SQLiteStorage
def tmp_storage(tmpdir):
return SQLiteStorage(str(tmpdir.join('test.db')))
| 21.5 | 53 | 0.782946 | 16 | 129 | 6.25 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108527 | 129 | 5 | 54 | 25.8 | 0.869565 | 0 | 0 | 0 | 0 | 0 | 0.054264 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
150744afb1de2f769b7f122732443fc1a432781b | 25 | py | Python | workspace/module/python-2.7/LxGui/.test/_test_re.py | no7hings/Lynxi | 43c745198a714c2e5aca86c6d7a014adeeb9abf7 | [
"MIT"
] | 2 | 2018-03-06T03:33:55.000Z | 2019-03-26T03:25:11.000Z | workspace/module/python-2.7/LxGui/.test/_test_re.py | no7hings/lynxi | 43c745198a714c2e5aca86c6d7a014adeeb9abf7 | [
"MIT"
] | null | null | null | workspace/module/python-2.7/LxGui/.test/_test_re.py | no7hings/lynxi | 43c745198a714c2e5aca86c6d7a014adeeb9abf7 | [
"MIT"
] | null | null | null | # coding:utf-8
import re
| 8.333333 | 14 | 0.72 | 5 | 25 | 3.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 0.16 | 25 | 2 | 15 | 12.5 | 0.809524 | 0.48 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
128ba39e339fd04a97c20e529b68aac35b23fc9c | 32 | py | Python | klustaviewa/scripts/__init__.py | fiath/test | b50898dafa90e93da48f573e0b3feb1bb6acd8de | [
"MIT",
"BSD-3-Clause"
] | 20 | 2015-02-21T07:48:23.000Z | 2021-08-03T10:05:25.000Z | klustaviewa/scripts/__init__.py | fiath/test | b50898dafa90e93da48f573e0b3feb1bb6acd8de | [
"MIT",
"BSD-3-Clause"
] | 22 | 2015-02-10T17:59:01.000Z | 2020-07-15T09:12:47.000Z | klustaviewa/scripts/__init__.py | fiath/test | b50898dafa90e93da48f573e0b3feb1bb6acd8de | [
"MIT",
"BSD-3-Clause"
] | 10 | 2015-04-01T20:33:24.000Z | 2017-10-08T15:19:42.000Z | from runklustaviewa import main
| 16 | 31 | 0.875 | 4 | 32 | 7 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 32 | 1 | 32 | 32 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
12fa7d28a3c9bbafd0ab0f7300d3fea677404afd | 81 | py | Python | test/misc_test.py | sblack-usu/ulmo | 3213bf0302b44e77abdff1f3f66e7f1083571ce8 | [
"BSD-3-Clause"
] | 123 | 2015-01-29T12:35:52.000Z | 2021-12-15T21:09:33.000Z | test/misc_test.py | sblack-usu/ulmo | 3213bf0302b44e77abdff1f3f66e7f1083571ce8 | [
"BSD-3-Clause"
] | 107 | 2015-01-05T17:56:22.000Z | 2021-11-19T22:46:23.000Z | test/misc_test.py | sblack-usu/ulmo | 3213bf0302b44e77abdff1f3f66e7f1083571ce8 | [
"BSD-3-Clause"
] | 49 | 2015-02-15T18:11:34.000Z | 2022-01-25T14:25:32.000Z | import ulmo
def test_version_is_set():
assert hasattr(ulmo, '__version__')
| 13.5 | 39 | 0.740741 | 11 | 81 | 4.818182 | 0.818182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.160494 | 81 | 5 | 40 | 16.2 | 0.779412 | 0 | 0 | 0 | 0 | 0 | 0.135802 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
424900a4c6c4e7c8bb984d3d834b640ad25b9498 | 316 | py | Python | challenges/find_maximum_value_binary_tree/conftest.py | asakatida/data-structures-and-algorithms.py | 587d1a66a6c15a3c7d7786275608f065687e1810 | [
"MIT"
] | null | null | null | challenges/find_maximum_value_binary_tree/conftest.py | asakatida/data-structures-and-algorithms.py | 587d1a66a6c15a3c7d7786275608f065687e1810 | [
"MIT"
] | 2 | 2020-09-24T13:13:49.000Z | 2021-06-25T15:15:35.000Z | challenges/find_maximum_value_binary_tree/conftest.py | grandquista/data-structures-and-algorithms.py | 587d1a66a6c15a3c7d7786275608f065687e1810 | [
"MIT"
] | null | null | null | from data_structures.binary_search_tree.bst import BST
from pytest import fixture
@fixture
def new_bst():
return BST()
@fixture
def filled_bst():
return BST([4, 3, 2, 1, 8, 6, 12, 9])
@fixture
def left_bst():
return BST(range(9, -9, -2))
@fixture
def right_bst():
return BST(range(-9, 9, 3))
| 13.73913 | 54 | 0.655063 | 53 | 316 | 3.773585 | 0.471698 | 0.2 | 0.24 | 0.17 | 0.19 | 0.19 | 0 | 0 | 0 | 0 | 0 | 0.059289 | 0.199367 | 316 | 22 | 55 | 14.363636 | 0.731225 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | true | 0 | 0.142857 | 0.285714 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 6 |
424f414f7d83c56e7f8f383bc00689fba7c49170 | 164 | py | Python | steward_fastapi/core/routers/__init__.py | AulonSal/steward-fastapi | 37772ff19852ec8ab80d715b74c5a25d59f65de2 | [
"Unlicense"
] | null | null | null | steward_fastapi/core/routers/__init__.py | AulonSal/steward-fastapi | 37772ff19852ec8ab80d715b74c5a25d59f65de2 | [
"Unlicense"
] | null | null | null | steward_fastapi/core/routers/__init__.py | AulonSal/steward-fastapi | 37772ff19852ec8ab80d715b74c5a25d59f65de2 | [
"Unlicense"
] | null | null | null | from .authentication import router as authentication_router
from .content import router as content_router
from .flexible_data import router as flexible_data_router
| 41 | 59 | 0.871951 | 23 | 164 | 6 | 0.347826 | 0.26087 | 0.304348 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.109756 | 164 | 3 | 60 | 54.666667 | 0.945205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
425c3070d8d4448972d6b591894d6314aff6aced | 154 | py | Python | mnn_core/nn/norm_modules/__init__.py | Acturos/moment_neural_network | c5074b14970f16a007ba5091b07127c4645cd50e | [
"MIT"
] | 1 | 2021-03-02T07:39:53.000Z | 2021-03-02T07:39:53.000Z | mnn_core/nn/norm_modules/__init__.py | Acturos/moment_neural_network | c5074b14970f16a007ba5091b07127c4645cd50e | [
"MIT"
] | null | null | null | mnn_core/nn/norm_modules/__init__.py | Acturos/moment_neural_network | c5074b14970f16a007ba5091b07127c4645cd50e | [
"MIT"
] | 2 | 2021-03-02T07:40:01.000Z | 2021-03-02T09:14:51.000Z | from .covariance_norm import CovarianceNorm
from .batch_norm import BatchNorm1dNoRho, BatchNorm1dDuo
from .layer_norm import LayerNormDuo, LayerNormNoRho
| 38.5 | 56 | 0.876623 | 17 | 154 | 7.764706 | 0.647059 | 0.227273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014286 | 0.090909 | 154 | 3 | 57 | 51.333333 | 0.928571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
426f3e36e3dc3743ef409b91e9ca26b016b1d1c4 | 27 | py | Python | netbox/__init__.py | bandwidth-intern/python-netbox | da897b2663c9105c4b42f404ef80985ae0a3a146 | [
"Apache-2.0"
] | 37 | 2017-10-30T10:31:36.000Z | 2022-01-09T17:36:27.000Z | netbox/__init__.py | bandwidth-intern/python-netbox | da897b2663c9105c4b42f404ef80985ae0a3a146 | [
"Apache-2.0"
] | 42 | 2018-03-09T16:25:20.000Z | 2022-01-27T08:26:50.000Z | netbox/__init__.py | bandwidth-intern/python-netbox | da897b2663c9105c4b42f404ef80985ae0a3a146 | [
"Apache-2.0"
] | 38 | 2018-03-09T15:42:23.000Z | 2022-03-30T06:31:17.000Z | from .netbox import NetBox
| 13.5 | 26 | 0.814815 | 4 | 27 | 5.5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 27 | 1 | 27 | 27 | 0.956522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
42b13fc13de58445ac14b4f92f3347b67e1a690d | 185 | py | Python | pyratelimit/__init__.py | ascheucher/pyratelimit | af816e11436fdc4c5ba6cefbf08c1fb0f8e89bc5 | [
"Apache-2.0"
] | null | null | null | pyratelimit/__init__.py | ascheucher/pyratelimit | af816e11436fdc4c5ba6cefbf08c1fb0f8e89bc5 | [
"Apache-2.0"
] | null | null | null | pyratelimit/__init__.py | ascheucher/pyratelimit | af816e11436fdc4c5ba6cefbf08c1fb0f8e89bc5 | [
"Apache-2.0"
] | null | null | null | from pyratelimit.pyratelimit import PyRateLimit
from pyratelimit.pyratelimit_exception import PyRateLimitException
from pyratelimit.redis_helper import RedisHelper
name = "pyratelimit" | 37 | 66 | 0.886486 | 19 | 185 | 8.526316 | 0.473684 | 0.277778 | 0.320988 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081081 | 185 | 5 | 67 | 37 | 0.952941 | 0 | 0 | 0 | 0 | 0 | 0.05914 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.75 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c44320a096b758fcaa8e546e8ed32365377ea708 | 144 | py | Python | WeatherAugmenter/GeoWeatherExceptions.py | kaumil/cmpt_732 | e753824d30fdc32c60f6699ac5b4d88d78f6fa33 | [
"MIT"
] | 1 | 2021-11-19T23:41:46.000Z | 2021-11-19T23:41:46.000Z | WeatherAugmenter/GeoWeatherExceptions.py | kaumil/cmpt_732 | e753824d30fdc32c60f6699ac5b4d88d78f6fa33 | [
"MIT"
] | null | null | null | WeatherAugmenter/GeoWeatherExceptions.py | kaumil/cmpt_732 | e753824d30fdc32c60f6699ac5b4d88d78f6fa33 | [
"MIT"
] | 1 | 2021-11-11T16:51:07.000Z | 2021-11-11T16:51:07.000Z | class GeoWeatherServiceFailedToLocateException(Exception):
pass
class GeoWeatherServiceFailedToRetrieveException(Exception):
pass
| 24 | 61 | 0.819444 | 8 | 144 | 14.75 | 0.625 | 0.220339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 144 | 5 | 62 | 28.8 | 0.951613 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 6 |
c44f5f8a5c174a6992a4d75bf1fe9acfa2ef06dd | 136 | py | Python | nnet/__init__.py | trip2eee/nnet | 08c435a7b40aa0b41eb64875b39d3705cf9cffdd | [
"MIT"
] | 3 | 2021-12-31T10:59:54.000Z | 2022-01-14T11:17:28.000Z | nnet/__init__.py | trip2eee/nnet | 08c435a7b40aa0b41eb64875b39d3705cf9cffdd | [
"MIT"
] | null | null | null | nnet/__init__.py | trip2eee/nnet | 08c435a7b40aa0b41eb64875b39d3705cf9cffdd | [
"MIT"
] | null | null | null | from nnet.dataset import Dataset
from nnet.dataloader import DataLoader
from nnet.module import Module
import nnet.loss
import nnet.nn
| 19.428571 | 38 | 0.838235 | 21 | 136 | 5.428571 | 0.380952 | 0.210526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125 | 136 | 6 | 39 | 22.666667 | 0.957983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c45660d769a261b7a5ab9247c2c8fa6feac4aab7 | 9,105 | py | Python | pandapower/test/opf/test_costs_pwl.py | hmaschke/pandapower-1 | 2e93969050d3d468ce57f73d358e97fabc6e5141 | [
"BSD-3-Clause"
] | 2 | 2019-11-01T11:01:41.000Z | 2022-02-07T12:55:55.000Z | pandapower/test/opf/test_costs_pwl.py | hmaschke/pandapower-1 | 2e93969050d3d468ce57f73d358e97fabc6e5141 | [
"BSD-3-Clause"
] | null | null | null | pandapower/test/opf/test_costs_pwl.py | hmaschke/pandapower-1 | 2e93969050d3d468ce57f73d358e97fabc6e5141 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2022 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
def test_cost_piecewise_linear_gen():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.05, max_p_mw=0.15, max_q_mvar=0.05,
min_q_mvar=-0.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_pwl_cost(net, 0, "gen", [[0, 75, 1.5], [75, 150, 1.5]])
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values * 1.5, atol=1e-3)
def test_cost_piecewise_linear_eg():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10)
pp.create_ext_grid(net, 0, min_p_mw=0, max_p_mw=0.050)
pp.create_gen(net, 1, p_mw=0.01, min_p_mw=0, max_p_mw=0.050, controllable=True)
# pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_pwl_cost(net, 0, "ext_grid", [[0, 50, -10]])
# run OPF
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, -10*net.res_ext_grid.p_mw.values)
def test_get_costs():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.05, max_p_mw=0.15, max_q_mvar=0.05,
min_q_mvar=-0.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_pwl_cost(net, 0, "gen", [[0, 150, 2]])
# run OPF
pp.runopp(net)
assert net["OPF_converged"]
assert net.res_gen.p_mw.values[0] - net.gen.min_p_mw.values[0] < 1e-2
assert np.isclose(net.res_cost, 2 * net.res_gen.p_mw.values[0])
# check and assert result
def test_cost_piecewise_linear_sgen():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_sgen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.05, max_p_mw=0.15, max_q_mvar=0.05,
min_q_mvar=-0.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_pwl_cost(net, 0, "sgen", [[0, 150, 2]])
# run OPF
pp.runopp(net)
assert net["OPF_converged"]
assert net.res_sgen.p_mw.values[0] - net.sgen.min_p_mw.values[0] < 1e-2
assert np.isclose(net.res_cost, 2 * net.res_sgen.p_mw.values[0])
def test_cost_piecewise_linear_load():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_load(net, 1, p_mw=0.1, controllable=True, max_p_mw=0.15, min_p_mw=0.050, max_q_mvar=0,
min_q_mvar=0)
pp.create_ext_grid(net, 0)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_pwl_cost(net, 0, "load", [[0, 75, 1.5], [75, 150, 1.5]])
pp.runopp(net)
assert net["OPF_converged"]
assert abs(net.res_cost - net.res_load.p_mw.values * 1.5) < 1e-3
def test_cost_piecewise_linear_sgen_uneven_slopes():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_sgen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.05, max_p_mw=0.15, max_q_mvar=0.05,
min_q_mvar=-0.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_pwl_cost(net, 0, "sgen", [[0, 75, 1.5], [75, 150, 1.5]])
# run OPF
pp.runopp(net)
assert net["OPF_converged"]
assert net.res_cost - net.res_sgen.p_mw.values * 1.5 < 1e-3
def test_cost_piecewise_linear_load_uneven_slopes():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_load(net, 1, p_mw=0.050)
pp.create_ext_grid(net, 0)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_pwl_cost(net, 0, "ext_grid", [(0, 0.075, 1), (0.075, 150, 2)])
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_ext_grid.p_mw.values[0])
net.load.p_mw = 0.1
pp.runopp(net)
assert np.isclose(net.res_cost, (0.075 + 2*(net.res_ext_grid.p_mw.values[0] - 0.075)), rtol=1e-2)
def test_cost_piecewise_linear_sgen_very_unsteady_slopes():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.5
vm_min = 0.5
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_sgen(net, 1, p_mw=0.10, controllable=True, min_p_mw=0, max_p_mw=1.50,
max_q_mvar=0.05, min_q_mvar=-0.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_pwl_cost(net, 0, "sgen", [[0, 0.75, -1], [0.75, 1500, 2]])
# run OPF
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_sgen.p_mw.values[0], .75, rtol=1e-2)
assert np.isclose(net.res_sgen.p_mw.values[0], -net.res_cost, rtol=1e-2)
if __name__ == "__main__":
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
pytest.main(["-xs"])
| 36.71371 | 102 | 0.642394 | 1,644 | 9,105 | 3.25 | 0.086375 | 0.094329 | 0.035935 | 0.041924 | 0.899308 | 0.887516 | 0.854763 | 0.834737 | 0.814711 | 0.808347 | 0 | 0.085539 | 0.228336 | 9,105 | 247 | 103 | 36.862348 | 0.674922 | 0.121252 | 0 | 0.66443 | 0 | 0 | 0.025079 | 0 | 0 | 0 | 0 | 0 | 0.134228 | 1 | 0.053691 | false | 0 | 0.040268 | 0 | 0.09396 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c4578227e73db118c621e24f1e455550c96d33b8 | 123 | py | Python | demo/hello_file.py | peitur/demo_python | 9e21f15d8bc7345637eda1f7b457a4847ee2cedf | [
"Apache-2.0"
] | 1 | 2020-09-28T17:05:41.000Z | 2020-09-28T17:05:41.000Z | demo/hello_file.py | peitur/demo_python | 9e21f15d8bc7345637eda1f7b457a4847ee2cedf | [
"Apache-2.0"
] | null | null | null | demo/hello_file.py | peitur/demo_python | 9e21f15d8bc7345637eda1f7b457a4847ee2cedf | [
"Apache-2.0"
] | 1 | 2020-05-05T07:31:16.000Z | 2020-05-05T07:31:16.000Z | #!/usr/bin/env python3
import os, sys, re
import datetime
from pprint import pprint
if __name__ == "__main__":
pass
| 12.3 | 26 | 0.707317 | 18 | 123 | 4.388889 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010101 | 0.195122 | 123 | 9 | 27 | 13.666667 | 0.787879 | 0.170732 | 0 | 0 | 0 | 0 | 0.079208 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 0.6 | 0 | 0.6 | 0.2 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
c470d6dd3f9956562681c9e8f5c3c48d889d42a5 | 146 | py | Python | src/exco/shortcut.py | thegangtechnology/excel_comment_orm | b38156b406ccb3ce87737b8ed049bbf3b8a39050 | [
"MIT"
] | 2 | 2020-11-10T04:53:07.000Z | 2020-11-12T03:53:46.000Z | src/exco/shortcut.py | thegangtechnology/excel_comment_orm | b38156b406ccb3ce87737b8ed049bbf3b8a39050 | [
"MIT"
] | 50 | 2020-11-09T06:30:31.000Z | 2022-01-06T05:00:50.000Z | src/exco/shortcut.py | thegangtechnology/excel_comment_orm | b38156b406ccb3ce87737b8ed049bbf3b8a39050 | [
"MIT"
] | null | null | null | from exco import ExcelProcessorFactory
def from_excel(fname: str):
return ExcelProcessorFactory.default().create_from_template_excel(fname)
| 24.333333 | 76 | 0.828767 | 17 | 146 | 6.882353 | 0.705882 | 0.17094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10274 | 146 | 5 | 77 | 29.2 | 0.89313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 6 |
670772f60d60f5594324cedd7a4f08f911848fe5 | 25 | py | Python | wimbledon/vis/__init__.py | alan-turing-institute/WimbledonPlanner | ff73f2a52425d7855ebf224f6acc59fa99ff664b | [
"MIT"
] | 1 | 2020-07-14T16:55:18.000Z | 2020-07-14T16:55:18.000Z | wimbledon/vis/__init__.py | alan-turing-institute/WimbledonPlanner | ff73f2a52425d7855ebf224f6acc59fa99ff664b | [
"MIT"
] | 29 | 2019-10-15T11:35:47.000Z | 2022-03-21T12:10:55.000Z | wimbledon/vis/__init__.py | alan-turing-institute/WimbledonPlanner | ff73f2a52425d7855ebf224f6acc59fa99ff664b | [
"MIT"
] | null | null | null | from .Visualise import *
| 12.5 | 24 | 0.76 | 3 | 25 | 6.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 25 | 1 | 25 | 25 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
674c5591df9df956330f475d960d9d3b3f01e7e5 | 2,736 | py | Python | benchmarks/fill_dense.py | 447983454/taichi | 2bfbca88b2d8cb1a070da9a40c5422c99b23fc2f | [
"MIT"
] | 1 | 2020-11-01T18:21:00.000Z | 2020-11-01T18:21:00.000Z | benchmarks/fill_dense.py | 447983454/taichi | 2bfbca88b2d8cb1a070da9a40c5422c99b23fc2f | [
"MIT"
] | null | null | null | benchmarks/fill_dense.py | 447983454/taichi | 2bfbca88b2d8cb1a070da9a40c5422c99b23fc2f | [
"MIT"
] | null | null | null | import taichi as ti
# originally by @KLozes
@ti.all_archs
def benchmark_flat_struct():
N = 4096
a = ti.var(dt=ti.f32, shape=(N, N))
@ti.kernel
def fill():
for i, j in a:
a[i, j] = 2.0
return ti.benchmark(fill, repeat=500)
@ti.all_archs
def benchmark_flat_range():
N = 4096
a = ti.var(dt=ti.f32, shape=(N, N))
@ti.kernel
def fill():
for i, j in ti.ndrange(N, N):
a[i, j] = 2.0
return ti.benchmark(fill, repeat=700)
@ti.all_archs
def benchmark_nested_struct():
a = ti.var(dt=ti.f32)
N = 512
ti.root.dense(ti.ij, [N, N]).dense(ti.ij, [8, 8]).place(a)
@ti.kernel
def fill():
for i, j in a:
a[i, j] = 2.0
return ti.benchmark(fill, repeat=700)
@ti.all_archs
def benchmark_nested_struct_listgen_8x8():
a = ti.var(dt=ti.f32)
ti.cfg.demote_dense_struct_fors = False
N = 512
ti.root.dense(ti.ij, [N, N]).dense(ti.ij, [8, 8]).place(a)
@ti.kernel
def fill():
for i, j in a:
a[i, j] = 2.0
return ti.benchmark(fill, repeat=1000)
@ti.all_archs
def benchmark_nested_struct_listgen_16x16():
a = ti.var(dt=ti.f32)
ti.cfg.demote_dense_struct_fors = False
N = 256
ti.root.dense(ti.ij, [N, N]).dense(ti.ij, [16, 16]).place(a)
@ti.kernel
def fill():
for i, j in a:
a[i, j] = 2.0
return ti.benchmark(fill, repeat=700)
@ti.all_archs
def benchmark_nested_range_blocked():
a = ti.var(dt=ti.f32)
N = 512
ti.root.dense(ti.ij, [N, N]).dense(ti.ij, [8, 8]).place(a)
@ti.kernel
def fill():
for X in range(N * N):
for Y in range(64):
a[X // N * 8 + Y // 8, X % N * 8 + Y % 8] = 2.0
return ti.benchmark(fill, repeat=800)
@ti.all_archs
def benchmark_nested_range():
a = ti.var(dt=ti.f32)
N = 512
ti.root.dense(ti.ij, [N, N]).dense(ti.ij, [8, 8]).place(a)
@ti.kernel
def fill():
for j in range(N * 8):
for i in range(N * 8):
a[i, j] = 2.0
return ti.benchmark(fill, repeat=1000)
@ti.all_archs
def benchmark_root_listgen():
a = ti.var(dt=ti.f32)
ti.cfg.demote_dense_struct_fors = False
N = 512
ti.root.dense(ti.ij, [N, N]).dense(ti.ij, [8, 8]).place(a)
@ti.kernel
def fill():
for i, j in a.parent():
a[i, j] = 2.0
return ti.benchmark(fill, repeat=800)
'''
# ti.cfg.arch = ti.cuda
# ti.cfg.print_kernel_llvm_ir_optimized = True
# ti.cfg.print_kernel_llvm_ir = True
ti.cfg.kernel_profiler = True
# ti.cfg.verbose_kernel_launches = True
print(benchmark_nested_struct_listgen_8x8())
# print(benchmark_root_listgen())
ti.kernel_profiler_print()
'''
| 19.970803 | 64 | 0.574196 | 473 | 2,736 | 3.20296 | 0.141649 | 0.027723 | 0.071287 | 0.068647 | 0.817822 | 0.788779 | 0.725413 | 0.70165 | 0.684488 | 0.658746 | 0 | 0.056716 | 0.265351 | 2,736 | 136 | 65 | 20.117647 | 0.697015 | 0.007675 | 0 | 0.77381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.011905 | 0 | 0.297619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
675321d031d6b8b31677a1a09299c38290f08480 | 25 | py | Python | heat/graph/__init__.py | sebimarkgraf/heat | 9638e384f52c9bade75590963b9d57e080692da4 | [
"MIT"
] | null | null | null | heat/graph/__init__.py | sebimarkgraf/heat | 9638e384f52c9bade75590963b9d57e080692da4 | [
"MIT"
] | 1 | 2020-07-29T08:01:09.000Z | 2020-07-29T08:10:41.000Z | heat/graph/__init__.py | sebimarkgraf/heat | 9638e384f52c9bade75590963b9d57e080692da4 | [
"MIT"
] | null | null | null | from .laplacian import *
| 12.5 | 24 | 0.76 | 3 | 25 | 6.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 25 | 1 | 25 | 25 | 0.904762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
675613d18dd4d809f31f81a336098e9a096375d0 | 257 | py | Python | move_base_msgs/msg/__init__.py | florent-lamiraux/fake-ros | e3c258fe029c8e68feeef0986091336fffda11ce | [
"BSD-3-Clause"
] | null | null | null | move_base_msgs/msg/__init__.py | florent-lamiraux/fake-ros | e3c258fe029c8e68feeef0986091336fffda11ce | [
"BSD-3-Clause"
] | null | null | null | move_base_msgs/msg/__init__.py | florent-lamiraux/fake-ros | e3c258fe029c8e68feeef0986091336fffda11ce | [
"BSD-3-Clause"
] | null | null | null | class MoveBaseAction:
def __init__(self):
self.action_goal = MoveBaseGoal()
self.action_result = MoveBaseResult()
class MoveBaseGoal:
def __init__(self):
pass
class MoveBaseResult:
def __init__(self):
pass
| 18.357143 | 45 | 0.645914 | 25 | 257 | 6.08 | 0.44 | 0.138158 | 0.217105 | 0.197368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.276265 | 257 | 13 | 46 | 19.769231 | 0.817204 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.3 | false | 0.2 | 0 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 6 |
6796d2d50b7bbdf834e419992a3bd7d0b3b0ecc7 | 40 | py | Python | ceefax/fonts/size7condensed/__init__.py | mscroggs/CEEFAX | 8e7a075de1809064b77360da24ebbbaa409c3bf2 | [
"MIT"
] | 1 | 2020-03-28T15:53:22.000Z | 2020-03-28T15:53:22.000Z | ceefax/fonts/size7condensed/__init__.py | mscroggs/CEEFAX | 8e7a075de1809064b77360da24ebbbaa409c3bf2 | [
"MIT"
] | 1 | 2021-02-05T13:43:52.000Z | 2021-02-05T13:43:52.000Z | ceefax/fonts/size7condensed/__init__.py | mscroggs/CEEFAX | 8e7a075de1809064b77360da24ebbbaa409c3bf2 | [
"MIT"
] | null | null | null | from .default import size7condensedfont
| 20 | 39 | 0.875 | 4 | 40 | 8.75 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027778 | 0.1 | 40 | 1 | 40 | 40 | 0.944444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
679b6afd37fb007e58fa9a482b3f5ed8357e3eb9 | 105,451 | py | Python | tensorflow_federated/python/core/impl/compiler/compiled_computation_transforms_test.py | alessiomora/federated | 3b501067ed7062aaec3cc8830aaec0a7cf8f0942 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/core/impl/compiler/compiled_computation_transforms_test.py | alessiomora/federated | 3b501067ed7062aaec3cc8830aaec0a7cf8f0942 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/core/impl/compiler/compiled_computation_transforms_test.py | alessiomora/federated | 3b501067ed7062aaec3cc8830aaec0a7cf8f0942 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.proto.v0 import computation_pb2 as pb
from tensorflow_federated.python.common_libs import serialization_utils
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.impl.compiler import building_block_factory
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import compiled_computation_transforms
from tensorflow_federated.python.core.impl.compiler import tensorflow_computation_factory
from tensorflow_federated.python.core.impl.compiler import tensorflow_computation_transformations
from tensorflow_federated.python.core.impl.compiler import test_utils as compiler_test_utils
from tensorflow_federated.python.core.impl.compiler import tree_analysis
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import type_serialization
from tensorflow_federated.python.core.impl.utils import tensorflow_utils
def _create_compiled_computation(py_fn, parameter_type):
proto, type_signature = tensorflow_computation_factory.create_computation_for_py_fn(
py_fn, parameter_type)
return building_blocks.CompiledComputation(
proto, type_signature=type_signature)
class CompiledComputationTransformsTest(test_case.TestCase,
parameterized.TestCase):
def test_select_graph_output_with_none_comp_raises_type_error(self):
with self.assertRaises(TypeError):
compiled_computation_transforms.select_graph_output(None, index=0)
def test_select_graph_output_with_no_selection_raises_value_error(self):
computation_arg_type = computation_types.StructType([('a', tf.int32),
('b', tf.float32)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(ValueError):
compiled_computation_transforms.select_graph_output(foo)
def test_select_graph_output_with_wrong_return_type_raises_type_error(self):
computation_arg_type = computation_types.TensorType(tf.int32)
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(TypeError):
compiled_computation_transforms.select_graph_output(foo, index=0)
def test_select_graph_output_by_name_bad_name_raises_value_error(self):
computation_arg_type = computation_types.StructType([('a', tf.int32),
('b', tf.float32)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(KeyError):
compiled_computation_transforms.select_graph_output(foo, name='x')
def test_select_graph_output_by_index_single_level_of_nesting(self):
computation_arg_type = computation_types.StructType([tf.int32, tf.float32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
first_element_selected = compiled_computation_transforms.select_graph_output(
foo, index=0)
second_element_selected = compiled_computation_transforms.select_graph_output(
foo, index=1)
self.assertEqual(first_element_selected.type_signature.result,
foo.type_signature.result[0])
self.assertEqual(foo.proto.tensorflow.parameter,
first_element_selected.proto.tensorflow.parameter)
self.assertEqual(foo.proto.tensorflow.initialize_op,
first_element_selected.proto.tensorflow.initialize_op)
self.assertEqual(foo.proto.tensorflow.result.struct.element[0].tensor,
first_element_selected.proto.tensorflow.result.tensor)
self.assertEqual(second_element_selected.type_signature.result,
foo.type_signature.result[1])
self.assertEqual(foo.proto.tensorflow.parameter,
second_element_selected.proto.tensorflow.parameter)
self.assertEqual(foo.proto.tensorflow.initialize_op,
second_element_selected.proto.tensorflow.initialize_op)
self.assertEqual(foo.proto.tensorflow.result.struct.element[1].tensor,
second_element_selected.proto.tensorflow.result.tensor)
def test_select_graph_output_by_name_single_level_of_nesting(self):
computation_arg_type = computation_types.StructType([('a', tf.int32),
('b', tf.float32)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
first_element_selected = compiled_computation_transforms.select_graph_output(
foo, name='a')
self.assertEqual(first_element_selected.type_signature.result,
computation_types.TensorType(tf.int32))
second_element_selected = compiled_computation_transforms.select_graph_output(
foo, name='b')
self.assertEqual(second_element_selected.type_signature.result,
computation_types.TensorType(tf.float32))
self.assertEqual(foo.proto.tensorflow.parameter,
first_element_selected.proto.tensorflow.parameter)
self.assertEqual(foo.proto.tensorflow.initialize_op,
first_element_selected.proto.tensorflow.initialize_op)
self.assertEqual(foo.proto.tensorflow.result.struct.element[0].tensor,
first_element_selected.proto.tensorflow.result.tensor)
self.assertEqual(second_element_selected.type_signature.result,
foo.type_signature.result[1])
self.assertEqual(foo.proto.tensorflow.parameter,
second_element_selected.proto.tensorflow.parameter)
self.assertEqual(foo.proto.tensorflow.initialize_op,
second_element_selected.proto.tensorflow.initialize_op)
self.assertEqual(foo.proto.tensorflow.result.struct.element[1].tensor,
second_element_selected.proto.tensorflow.result.tensor)
def test_select_graph_output_by_index_two_nested_levels_keeps_nested_type(
self):
nested_type1 = computation_types.StructType([('a', tf.int32),
('b', tf.float32)])
nested_type2 = computation_types.StructType([('c', tf.int32),
('d', tf.float32)])
computation_arg_type = computation_types.StructType([('x', nested_type1),
('y', nested_type2)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
first_element_selected = compiled_computation_transforms.select_graph_output(
foo, index=0)
self.assertEqual(first_element_selected.type_signature.result, nested_type1)
second_element_selected = compiled_computation_transforms.select_graph_output(
foo, index=1)
self.assertEqual(second_element_selected.type_signature.result,
nested_type2)
self.assertEqual(foo.proto.tensorflow.parameter,
first_element_selected.proto.tensorflow.parameter)
self.assertEqual(foo.proto.tensorflow.initialize_op,
first_element_selected.proto.tensorflow.initialize_op)
self.assertEqual(foo.proto.tensorflow.result.struct.element[0].struct,
first_element_selected.proto.tensorflow.result.struct)
self.assertEqual(second_element_selected.type_signature.result,
foo.type_signature.result[1])
self.assertEqual(foo.proto.tensorflow.parameter,
second_element_selected.proto.tensorflow.parameter)
self.assertEqual(foo.proto.tensorflow.initialize_op,
second_element_selected.proto.tensorflow.initialize_op)
self.assertEqual(foo.proto.tensorflow.result.struct.element[1].struct,
second_element_selected.proto.tensorflow.result.struct)
def test_select_graph_output_by_name_two_nested_levels_keeps_nested_type(
self):
nested_type1 = computation_types.StructType([('a', tf.int32),
('b', tf.float32)])
nested_type2 = computation_types.StructType([('c', tf.int32),
('d', tf.float32)])
computation_arg_type = computation_types.StructType([('x', nested_type1),
('y', nested_type2)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
first_element_selected = compiled_computation_transforms.select_graph_output(
foo, name='x')
self.assertEqual(first_element_selected.type_signature.result, nested_type1)
second_element_selected = compiled_computation_transforms.select_graph_output(
foo, name='y')
self.assertEqual(second_element_selected.type_signature.result,
nested_type2)
self.assertEqual(foo.proto.tensorflow.parameter,
first_element_selected.proto.tensorflow.parameter)
self.assertEqual(foo.proto.tensorflow.initialize_op,
first_element_selected.proto.tensorflow.initialize_op)
self.assertEqual(foo.proto.tensorflow.result.struct.element[0].struct,
first_element_selected.proto.tensorflow.result.struct)
self.assertEqual(second_element_selected.type_signature.result,
foo.type_signature.result[1])
self.assertEqual(foo.proto.tensorflow.parameter,
second_element_selected.proto.tensorflow.parameter)
self.assertEqual(foo.proto.tensorflow.initialize_op,
second_element_selected.proto.tensorflow.initialize_op)
self.assertEqual(foo.proto.tensorflow.result.struct.element[1].struct,
second_element_selected.proto.tensorflow.result.struct)
def test_permute_graph_inputs_with_none_comp_raises_type_error(self):
with self.assertRaises(TypeError):
compiled_computation_transforms.permute_graph_inputs(None, [0])
def test_permute_graph_inputs_with_integer_map_raises_type_error(self):
computation_arg_type = computation_types.StructType([('a', tf.int32)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(TypeError):
compiled_computation_transforms.permute_graph_inputs(foo, 0)
def test_permute_graph_inputs_with_list_of_strings_raises_type_error(self):
computation_arg_type = computation_types.StructType([('a', tf.int32)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(TypeError):
compiled_computation_transforms.permute_graph_inputs(foo, ['a'])
def test_permute_graph_inputs_wrong_permutation_length_raises_value_error(
self):
computation_arg_type = computation_types.StructType([tf.int32, tf.float32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(ValueError):
compiled_computation_transforms.permute_graph_inputs(foo, [0])
def test_permute_graph_inputs_repeated_indices_raises_value_error(self):
computation_arg_type = computation_types.StructType([tf.int32, tf.float32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(ValueError):
compiled_computation_transforms.permute_graph_inputs(foo, [0, 0])
def test_permute_graph_inputs_large_index_raises_value_error(self):
computation_arg_type = computation_types.StructType([tf.int32, tf.float32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(ValueError):
compiled_computation_transforms.permute_graph_inputs(foo, [0, 2])
def test_permute_graph_inputs_negative_index_raises_value_error(self):
computation_arg_type = computation_types.StructType([tf.int32, tf.float32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(ValueError):
compiled_computation_transforms.permute_graph_inputs(foo, [0, -1])
def test_permute_graph_inputs_identity_permutation_noops(self):
computation_arg_type = computation_types.StructType([tf.int32, tf.float32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
mapped_to_identity = compiled_computation_transforms.permute_graph_inputs(
foo, [0, 1])
self.assertEqual(mapped_to_identity.proto.tensorflow.parameter,
foo.proto.tensorflow.parameter)
self.assertEqual(mapped_to_identity.proto.tensorflow.result,
foo.proto.tensorflow.result)
self.assertEqual(mapped_to_identity.proto.tensorflow.initialize_op,
foo.proto.tensorflow.initialize_op)
foo_pruned_proto = tensorflow_computation_transformations.prune_tensorflow_proto(
foo.proto)
self.assertProtoEquals(
serialization_utils.unpack_graph_def(
mapped_to_identity.proto.tensorflow.graph_def),
serialization_utils.unpack_graph_def(
foo_pruned_proto.tensorflow.graph_def))
self.assertEqual(mapped_to_identity.type_signature, foo.type_signature)
def test_permute_graph_inputs_identity_permutation_leaves_names_alone(self):
computation_arg_type = computation_types.StructType([('a', tf.int32),
('b', tf.float32)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
foo_pruned_proto = tensorflow_computation_transformations.prune_tensorflow_proto(
foo.proto)
mapped_to_identity = compiled_computation_transforms.permute_graph_inputs(
foo, [0, 1])
self.assertEqual(mapped_to_identity.proto.tensorflow.parameter,
foo.proto.tensorflow.parameter)
self.assertEqual(mapped_to_identity.proto.tensorflow.result,
foo.proto.tensorflow.result)
self.assertEqual(mapped_to_identity.proto.tensorflow.initialize_op,
foo.proto.tensorflow.initialize_op)
self.assertProtoEquals(
serialization_utils.unpack_graph_def(
mapped_to_identity.proto.tensorflow.graph_def),
serialization_utils.unpack_graph_def(
foo_pruned_proto.tensorflow.graph_def))
self.assertEqual(mapped_to_identity.type_signature, foo.type_signature)
def test_permute_graph_inputs_flip_input_order_changes_only_parameters(self):
computation_arg_type = computation_types.StructType([('a', tf.int32),
('b', tf.float32),
('c', tf.bool)])
permuted_arg_type = computation_types.StructType([('c', tf.bool),
('a', tf.int32),
('b', tf.float32)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
permuted_inputs = compiled_computation_transforms.permute_graph_inputs(
foo, [2, 0, 1])
self.assertEqual(permuted_inputs.type_signature.parameter,
permuted_arg_type)
self.assertEqual(permuted_inputs.type_signature.result,
foo.type_signature.result)
pruned_foo_proto = tensorflow_computation_transformations.prune_tensorflow_proto(
foo.proto)
self.assertProtoEquals(
serialization_utils.unpack_graph_def(
permuted_inputs.proto.tensorflow.graph_def),
serialization_utils.unpack_graph_def(
pruned_foo_proto.tensorflow.graph_def))
self.assertEqual(permuted_inputs.proto.tensorflow.initialize_op,
foo.proto.tensorflow.initialize_op)
self.assertEqual(permuted_inputs.proto.tensorflow.result,
foo.proto.tensorflow.result)
def test_permute_graph_inputs_flip_input_order_executes_correctly(self):
computation_arg_type = computation_types.StructType([('a', tf.int32),
('b', tf.float32),
('c', tf.bool)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
flipped_inputs = compiled_computation_transforms.permute_graph_inputs(
foo, [1, 0, 2])
expected_result = structure.Struct([
('a', 0),
('b', 1.0),
('c', True),
])
structure_input = structure.Struct([
('b', 1.0),
('a', 0),
('c', True),
])
result = compiler_test_utils.run_tensorflow(flipped_inputs.proto,
[1.0, 0, True])
self.assertEqual(result, expected_result)
result = compiler_test_utils.run_tensorflow(flipped_inputs.proto,
structure_input)
self.assertEqual(result, expected_result)
with self.assertRaises(TypeError):
compiler_test_utils.run_tensorflow(flipped_inputs.proto, [0, 1.0, True])
with self.assertRaises(TypeError):
compiler_test_utils.run_tensorflow(flipped_inputs.proto, expected_result)
class WrapParameterAsTupleTest(test_case.TestCase, parameterized.TestCase):
def test_bind_graph_parameter_as_tuple_raises_on_none(self):
with self.assertRaises(TypeError):
compiled_computation_transforms.bind_graph_parameter_as_tuple(None)
def test_bind_graph_parameter_as_tuple_raises_on_non_string_name(self):
computation_arg_type = computation_types.StructType([tf.int32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(TypeError):
compiled_computation_transforms.bind_graph_parameter_as_tuple(foo, name=1)
def test_bind_graph_parameter_as_tuple_wraps_tuple(self):
computation_arg_type = computation_types.StructType([tf.int32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
wrapped_inputs = compiled_computation_transforms.bind_graph_parameter_as_tuple(
foo)
parameter_type = computation_types.StructType(
[foo.type_signature.parameter])
expected_type_signature = computation_types.FunctionType(
parameter_type, foo.type_signature.result)
self.assertEqual(wrapped_inputs.type_signature, expected_type_signature)
actual_result = compiler_test_utils.run_tensorflow(wrapped_inputs.proto,
[[1]])
expected_result = compiler_test_utils.run_tensorflow(foo.proto, [1])
self.assertEqual(actual_result, expected_result)
def assertSequenceEqual(self, a, b):
"""Assert two tff.SequenceType values are the same."""
if (isinstance(a, collections.abc.Sequence) and
isinstance(b, collections.abc.Sequence)):
sequence = zip(a, b)
elif isinstance(a, tf.data.Dataset) and isinstance(b, tf.data.Dataset):
sequence = tf.data.Dataset.zip(a, b)
else:
self.fail('Value is not a sequence, got types a={!s}, b={!s}'.format(
type(a), type(b)))
for element in sequence:
self.assertEqual(element[0], element[1])
def test_bind_graph_parameter_as_tuple_wraps_sequence(self):
computation_arg_type = computation_types.SequenceType(tf.int32)
foo = building_block_factory.create_compiled_identity(computation_arg_type)
wrapped_inputs = compiled_computation_transforms.bind_graph_parameter_as_tuple(
foo)
parameter_type = computation_types.StructType(
[foo.type_signature.parameter])
expected_type_signature = computation_types.FunctionType(
parameter_type, foo.type_signature.result)
self.assertEqual(wrapped_inputs.type_signature, expected_type_signature)
actual_result = compiler_test_utils.run_tensorflow(wrapped_inputs.proto,
[[1]])
expected_result = compiler_test_utils.run_tensorflow(foo.proto, [1])
self.assertSequenceEqual(actual_result, expected_result)
def test_bind_graph_parameter_as_tuple_wraps_tensor(self):
computation_arg_type = computation_types.TensorType(tf.int32)
foo = building_block_factory.create_compiled_identity(computation_arg_type)
wrapped_inputs = compiled_computation_transforms.bind_graph_parameter_as_tuple(
foo)
parameter_type = computation_types.StructType(
[foo.type_signature.parameter])
expected_type_signature = computation_types.FunctionType(
parameter_type, foo.type_signature.result)
self.assertEqual(wrapped_inputs.type_signature, expected_type_signature)
actual_result = compiler_test_utils.run_tensorflow(wrapped_inputs.proto,
[1])
expected_result = compiler_test_utils.run_tensorflow(foo.proto, 1)
self.assertEqual(actual_result, expected_result)
def test_bind_graph_parameter_as_tuple_adds_name(self):
computation_arg_type = computation_types.TensorType(tf.int32)
foo = building_block_factory.create_compiled_identity(computation_arg_type)
wrapped_inputs = compiled_computation_transforms.bind_graph_parameter_as_tuple(
foo, name='a')
expected_type_signature = computation_types.FunctionType(
computation_types.StructType((
'a',
foo.type_signature.parameter,
)), foo.type_signature.result)
self.assertEqual(wrapped_inputs.type_signature, expected_type_signature)
actual_result = compiler_test_utils.run_tensorflow(wrapped_inputs.proto,
[1])
expected_result = compiler_test_utils.run_tensorflow(foo.proto, 1)
self.assertEqual(actual_result, expected_result)
class WrapResultAsTupleTest(test_case.TestCase, parameterized.TestCase):
def test_bind_graph_result_as_tuple_raises_on_none(self):
with self.assertRaises(TypeError):
compiled_computation_transforms.bind_graph_result_as_tuple(None)
def test_bind_graph_result_as_tuple_raises_on_non_string_name(self):
computation_arg_type = computation_types.StructType([tf.int32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
with self.assertRaises(TypeError):
compiled_computation_transforms.bind_graph_result_as_tuple(foo, name=1)
def test_bind_graph_result_as_tuple_wraps_tuple(self):
computation_arg_type = computation_types.StructType([tf.int32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
wrapped_output = compiled_computation_transforms.bind_graph_result_as_tuple(
foo)
expected_type_signature = computation_types.FunctionType(
foo.type_signature.parameter,
computation_types.StructType([foo.type_signature.result]))
self.assertEqual(wrapped_output.type_signature, expected_type_signature)
actual_result = compiler_test_utils.run_tensorflow(wrapped_output.proto,
[1])
expected_result = compiler_test_utils.run_tensorflow(foo.proto, [1])
self.assertEqual(actual_result[0], expected_result)
def test_bind_graph_result_as_tuple_wraps_sequence(self):
computation_arg_type = computation_types.SequenceType(tf.int32)
foo = building_block_factory.create_compiled_identity(computation_arg_type)
wrapped_output = compiled_computation_transforms.bind_graph_result_as_tuple(
foo)
expected_type_signature = computation_types.FunctionType(
foo.type_signature.parameter,
computation_types.StructType([foo.type_signature.result]))
self.assertEqual(wrapped_output.type_signature, expected_type_signature)
actual_result = compiler_test_utils.run_tensorflow(wrapped_output.proto,
[1])
expected_result = compiler_test_utils.run_tensorflow(foo.proto, [1])
self.assertSequenceEqual(actual_result[0], expected_result)
def test_bind_graph_result_as_tuple_wraps_tensor(self):
computation_arg_type = computation_types.TensorType(tf.int32)
foo = building_block_factory.create_compiled_identity(computation_arg_type)
wrapped_output = compiled_computation_transforms.bind_graph_result_as_tuple(
foo)
expected_type_signature = computation_types.FunctionType(
foo.type_signature.parameter,
computation_types.StructType([foo.type_signature.result]))
self.assertEqual(wrapped_output.type_signature, expected_type_signature)
actual_result = compiler_test_utils.run_tensorflow(wrapped_output.proto,
[1])
expected_result = compiler_test_utils.run_tensorflow(foo.proto, [1])
self.assertEqual(actual_result[0], expected_result)
def test_bind_graph_result_as_tuple_adds_name(self):
computation_arg_type = computation_types.TensorType(tf.int32)
foo = building_block_factory.create_compiled_identity(computation_arg_type)
wrapped_output = compiled_computation_transforms.bind_graph_result_as_tuple(
foo, name='a')
expected_type_signature = computation_types.FunctionType(
foo.type_signature.parameter,
computation_types.StructType((
'a',
foo.type_signature.result,
)))
self.assertEqual(wrapped_output.type_signature, expected_type_signature)
actual_result = compiler_test_utils.run_tensorflow(wrapped_output.proto, 1)
expected_result = compiler_test_utils.run_tensorflow(foo.proto, 1)
self.assertEqual(actual_result[0], expected_result)
class GraphInputPaddingTest(test_case.TestCase, parameterized.TestCase):
def test_pad_graph_inputs_to_match_type_raises_on_none(self):
with self.assertRaisesRegex(TypeError, r'Expected.*CompiledComputation'):
compiled_computation_transforms.pad_graph_inputs_to_match_type(
None, computation_types.StructType([tf.int32]))
def test_pad_graph_inputs_to_match_type_raises_on_wrong_requested_type(self):
comp = building_block_factory.create_compiled_identity(
computation_types.StructType([tf.int32]))
tensor_type = computation_types.TensorType(tf.int32)
with self.assertRaisesRegex(TypeError, r'Expected.*StructType'):
compiled_computation_transforms.pad_graph_inputs_to_match_type(
comp, tensor_type)
def test_pad_graph_inputs_to_match_type_raises_on_wrong_graph_parameter_type(
self):
comp = building_block_factory.create_compiled_identity(
computation_types.TensorType(tf.int32))
with self.assertRaisesRegex(
TypeError,
r'Can only pad inputs of a CompiledComputation with parameter type struct'
):
compiled_computation_transforms.pad_graph_inputs_to_match_type(
comp, computation_types.StructType([tf.int32]))
def test_pad_graph_inputs_to_match_type_raises_on_requested_type_too_short(
self):
comp = building_block_factory.create_compiled_identity(
computation_types.StructType([tf.int32] * 3))
with self.assertRaisesRegex(ValueError, r'must have more elements'):
compiled_computation_transforms.pad_graph_inputs_to_match_type(
comp, computation_types.StructType([tf.int32] * 2))
def test_pad_graph_inputs_to_match_type_raises_on_mismatched_graph_type_and_requested_type(
self):
comp = building_block_factory.create_compiled_identity(
computation_types.StructType([tf.float32]))
with self.assertRaisesRegex(TypeError, r'must match the beginning'):
compiled_computation_transforms.pad_graph_inputs_to_match_type(
comp, computation_types.StructType([tf.int32] * 2))
def test_pad_graph_inputs_to_match_type_preserves_named_type_signature(self):
computation_arg_type = computation_types.StructType([('a', tf.int32)])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
padded_inputs = compiled_computation_transforms.pad_graph_inputs_to_match_type(
foo, computation_types.StructType([('a', tf.int32), ('b', tf.float32)]))
expected_type_signature = computation_types.FunctionType(
[('a', tf.int32), ('b', tf.float32)], [('a', tf.int32)])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
padded_inputs.type_signature.check_equivalent_to(expected_type_signature)
def test_pad_graph_inputs_to_match_type_adds_names_to_unnamed_tuple(self):
computation_arg_type = computation_types.StructType([tf.int32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
padded_inputs = compiled_computation_transforms.pad_graph_inputs_to_match_type(
foo, computation_types.StructType([('a', tf.int32), ('b', tf.float32)]))
expected_type_signature = computation_types.FunctionType(
[('a', tf.int32), ('b', tf.float32)], [tf.int32])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
padded_inputs.type_signature.check_equivalent_to(expected_type_signature)
def test_pad_graph_inputs_to_match_type_preserves_unnamed_type_signature(
self):
computation_arg_type = computation_types.StructType([tf.int32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
padded_inputs = compiled_computation_transforms.pad_graph_inputs_to_match_type(
foo, computation_types.StructType([tf.int32, tf.float32]))
expected_type_signature = computation_types.FunctionType(
[tf.int32, tf.float32], [tf.int32])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
padded_inputs.type_signature.check_equivalent_to(expected_type_signature)
def test_pad_graph_inputs_to_match_type_add_single_int_executes_correctly(
self):
computation_arg_type = computation_types.StructType([tf.int32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
padded_inputs = compiled_computation_transforms.pad_graph_inputs_to_match_type(
foo, computation_types.StructType([tf.int32, tf.float32]))
expected_result = structure.Struct([(None, 1)])
actual_result = compiler_test_utils.run_tensorflow(padded_inputs.proto,
[1, 0.0])
self.assertEqual(actual_result, expected_result)
actual_result = compiler_test_utils.run_tensorflow(padded_inputs.proto,
[1, 10.0])
self.assertEqual(actual_result, expected_result)
def test_pad_graph_inputs_to_match_type_adds_names_to_unnamed_tuple_and_executes(
self):
computation_arg_type = computation_types.StructType([tf.int32])
foo = building_block_factory.create_compiled_identity(computation_arg_type)
padded_inputs = compiled_computation_transforms.pad_graph_inputs_to_match_type(
foo, computation_types.StructType([('a', tf.int32), ('b', tf.float32)]))
expected_result = structure.Struct([(None, 1)])
actual_result = compiler_test_utils.run_tensorflow(padded_inputs.proto, {
'a': 1,
'b': 0.0,
})
self.assertEqual(actual_result, expected_result)
actual_result = compiler_test_utils.run_tensorflow(padded_inputs.proto, {
'a': 1,
'b': 10.0,
})
self.assertEqual(actual_result, expected_result)
class ConcatenateTFBlocksTest(test_case.TestCase, parameterized.TestCase):
def test_concatenenate_tensorflow_blocks_raises_on_none(self):
with self.assertRaises(TypeError):
compiled_computation_transforms.concatenate_tensorflow_blocks(
None, [None])
def test_concatenenate_tensorflow_blocks_raises_no_iterable(self):
foo_type = computation_types.TensorType(tf.float32)
foo = building_block_factory.create_tensorflow_constant(foo_type, 0.0)
with self.assertRaises(TypeError):
compiled_computation_transforms.concatenate_tensorflow_blocks(foo, [None])
def test_concatenenate_tensorflow_blocks_raises_bad_comp_in_list(self):
foo_type = computation_types.TensorType(tf.float32)
foo = building_block_factory.create_tensorflow_constant(foo_type, 0.0)
bad_comp = building_blocks.Data('x', tf.int32)
with self.assertRaises(TypeError):
compiled_computation_transforms.concatenate_tensorflow_blocks(
[foo, bad_comp], [None, None])
def test_concatenate_tensorflow_blocks_fails_empty_list(self):
with self.assertRaises(ValueError):
compiled_computation_transforms.concatenate_tensorflow_blocks([], [None])
def test_concatenate_tensorflow_blocks_raises_bad_names_list_length(self):
foo_type = computation_types.TensorType(tf.float32)
foo = building_block_factory.create_tensorflow_constant(foo_type, 0.0)
bar_type = computation_types.TensorType(tf.float32)
bar = building_block_factory.create_tensorflow_constant(bar_type, 1.0)
with self.assertRaises(ValueError):
compiled_computation_transforms.concatenate_tensorflow_blocks([foo, bar],
[None])
def test_concatenate_tensorflow_blocks_raises_bad_names_list_type(self):
foo_type = computation_types.TensorType(tf.float32)
foo = building_block_factory.create_tensorflow_constant(foo_type, 0.0)
bar_type = computation_types.TensorType(tf.float32)
bar = building_block_factory.create_tensorflow_constant(bar_type, 1.0)
with self.assertRaises(TypeError):
compiled_computation_transforms.concatenate_tensorflow_blocks([foo, bar],
'x')
def test_concatenate_tensorflow_blocks_raises_bad_names_list_element_type(
self):
foo_type = computation_types.TensorType(tf.float32)
foo = building_block_factory.create_tensorflow_constant(foo_type, 0.0)
bar_type = computation_types.TensorType(tf.float32)
bar = building_block_factory.create_tensorflow_constant(bar_type, 1.0)
with self.assertRaises(TypeError):
compiled_computation_transforms.concatenate_tensorflow_blocks([foo, bar],
['x', 1])
def test_concatenate_tensorflow_blocks_no_arg(self):
foo_type = computation_types.TensorType(tf.float32)
foo = building_block_factory.create_tensorflow_constant(foo_type, 0.0)
bar_type = computation_types.TensorType(tf.float32)
bar = building_block_factory.create_tensorflow_constant(bar_type, 1.0)
merged_comp = compiled_computation_transforms.concatenate_tensorflow_blocks(
[foo.function, bar.function], [None, None])
self.assertIsInstance(merged_comp, building_blocks.CompiledComputation)
concatenated_type = computation_types.FunctionType(None,
[tf.float32, tf.float32])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
merged_comp.type_signature.check_equivalent_to(concatenated_type)
actual_result = compiler_test_utils.run_tensorflow(merged_comp.proto, None)
expected_result = structure.Struct([(None, 0.0), (None, 1.0)])
self.assertAlmostEqual(actual_result, expected_result)
def test_concatenate_tensorflow_blocks_named_outputs_type_preserved(self):
foo_type = computation_types.TensorType(tf.float32)
foo = building_block_factory.create_tensorflow_constant(foo_type, 0.0)
bar_type = computation_types.TensorType(tf.float32)
bar = building_block_factory.create_tensorflow_constant(bar_type, 1.0)
merged_comp = compiled_computation_transforms.concatenate_tensorflow_blocks(
[foo.function, bar.function], ['a', 'b'])
self.assertIsInstance(merged_comp, building_blocks.CompiledComputation)
concatenated_type = computation_types.FunctionType(None,
[('a', tf.float32),
('b', tf.float32)])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
merged_comp.type_signature.check_equivalent_to(concatenated_type)
def test_concatenate_tensorflow_blocks_mix_of_arg_and_no_arg(self):
foo_type = computation_types.TensorType(tf.float32)
foo = building_block_factory.create_tensorflow_constant(foo_type, 0.0)
bar = _create_compiled_computation(lambda x: x + tf.constant(1.0),
computation_types.TensorType(tf.float32))
merged_comp = compiled_computation_transforms.concatenate_tensorflow_blocks(
[foo.function, bar], [None, None])
self.assertIsInstance(merged_comp, building_blocks.CompiledComputation)
concatenated_type = computation_types.FunctionType(tf.float32,
[tf.float32, tf.float32])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
merged_comp.type_signature.check_equivalent_to(concatenated_type)
actual_result = compiler_test_utils.run_tensorflow(merged_comp.proto, 0.0)
expected_result = structure.Struct([(None, 0.0), (None, 1.0)])
self.assertAlmostEqual(actual_result, expected_result)
def test_concatenate_tensorflow_blocks_tensor_args(self):
foo = _create_compiled_computation(lambda x: x + tf.constant(0.0),
computation_types.TensorType(tf.float32))
bar = _create_compiled_computation(lambda x: x + tf.constant(1.0),
computation_types.TensorType(tf.float32))
merged_comp = compiled_computation_transforms.concatenate_tensorflow_blocks(
[foo, bar], [None, None])
self.assertIsInstance(merged_comp, building_blocks.CompiledComputation)
concatenated_type = computation_types.FunctionType([tf.float32, tf.float32],
[tf.float32, tf.float32])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
merged_comp.type_signature.check_equivalent_to(concatenated_type)
actual_result = compiler_test_utils.run_tensorflow(merged_comp.proto,
[1.0, 0.0])
expected_result = structure.Struct([(None, 1.0), (None, 1.0)])
self.assertAlmostEqual(actual_result, expected_result)
actual_result = compiler_test_utils.run_tensorflow(merged_comp.proto,
[2.0, 2.0])
expected_result = structure.Struct([(None, 2.0), (None, 3.0)])
self.assertAlmostEqual(actual_result, expected_result)
def test_concatenate_tensorflow_blocks_unnamed_tuple_args(self):
foo = _create_compiled_computation(
lambda x: [x[0] + tf.constant(0.0), x[1] + tf.constant(1.0)],
computation_types.StructType([tf.float32, tf.float32]))
bar = _create_compiled_computation(
lambda x: [x[0] + tf.constant(1.0), x[1] + tf.constant(1.0)],
computation_types.StructType([tf.float32, tf.float32]))
merged_comp = compiled_computation_transforms.concatenate_tensorflow_blocks(
[foo, bar], [None, None])
self.assertIsInstance(merged_comp, building_blocks.CompiledComputation)
concatenated_type = computation_types.FunctionType(
[[tf.float32, tf.float32], [tf.float32, tf.float32]],
[[tf.float32, tf.float32], [tf.float32, tf.float32]])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
merged_comp.type_signature.check_equivalent_to(concatenated_type)
actual_result = compiler_test_utils.run_tensorflow(merged_comp.proto,
[[1.0, 0.0], [0.0, 1.0]])
expected_result = structure.Struct([(None, 1.0), (None, 1.0)])
self.assertEqual(actual_result[0], expected_result)
actual_result = compiler_test_utils.run_tensorflow(merged_comp.proto,
[[1.0, 0.0], [0.0, 1.0]])
expected_result = structure.Struct([(None, 1.0), (None, 2.0)])
self.assertEqual(actual_result[1], expected_result)
def test_concatenate_tensorflow_blocks_named_tuple_args(self):
foo_type = computation_types.StructType([('a', tf.float32),
('b', tf.float32)])
foo = building_block_factory.create_compiled_identity(foo_type)
bar_type = computation_types.StructType([('c', tf.float32),
('d', tf.float32)])
bar = building_block_factory.create_compiled_identity(bar_type)
merged_comp = compiled_computation_transforms.concatenate_tensorflow_blocks(
[foo, bar], [None, None])
self.assertIsInstance(merged_comp, building_blocks.CompiledComputation)
concatenated_type = computation_types.FunctionType(
[[('a', tf.float32),
('b', tf.float32)], [('c', tf.float32), ('d', tf.float32)]],
[[('a', tf.float32),
('b', tf.float32)], [('c', tf.float32), ('d', tf.float32)]])
self.assertEqual(str(merged_comp.type_signature), str(concatenated_type))
actual_result = compiler_test_utils.run_tensorflow(merged_comp.proto,
[[1.0, 0.0], [0.0, 1.0]])
expected_result = structure.Struct([('a', 1.), ('b', 0.)])
self.assertEqual(actual_result[0], expected_result)
expected_result = structure.Struct([('c', 0.), ('d', 1.)])
self.assertEqual(actual_result[1], expected_result)
def test_concatenate_tensorflow_blocks_sequence_parameters_and_results(self):
foo = _create_compiled_computation(
lambda ds: ds.reduce(tf.constant(0, tf.int64), lambda x, y: x + y),
computation_types.SequenceType(tf.int64))
bar = _create_compiled_computation(lambda: tf.data.Dataset.range(5), None)
merged_reduce_comps = compiled_computation_transforms.concatenate_tensorflow_blocks(
[foo, foo], [None, None])
merged_input_comps = compiled_computation_transforms.concatenate_tensorflow_blocks(
[bar, bar], [None, None])
concat_input_type_signature = computation_types.FunctionType(
None, [
computation_types.SequenceType(tf.int64),
computation_types.SequenceType(tf.int64),
])
concat_reduce_type_signature = computation_types.FunctionType(
concat_input_type_signature.result, [tf.int64, tf.int64])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
concat_input_type_signature.check_equivalent_to(
merged_input_comps.type_signature)
concat_reduce_type_signature.check_equivalent_to(
merged_reduce_comps.type_signature)
input_result = compiler_test_utils.run_tensorflow(merged_input_comps.proto)
actual_result = compiler_test_utils.run_tensorflow(
merged_reduce_comps.proto, input_result)
self.assertEqual(actual_result[0], 10)
self.assertEqual(actual_result[1], 10)
def _create_simple_selection_from_called_graph():
noarg_tuple = _create_compiled_computation(
lambda: [tf.constant(0.), tf.constant(1.)], None)
called_noarg_tuple = building_blocks.Call(noarg_tuple, None)
selected_result = building_blocks.Selection(called_noarg_tuple, index=0)
return selected_result
class SelectionFromCalledTensorFlowBlockTest(test_case.TestCase,
parameterized.TestCase):
def test_should_transform_identifies_correct_pattern(self):
pattern = _create_simple_selection_from_called_graph()
logic = compiled_computation_transforms.SelectionFromCalledTensorFlowBlock()
self.assertTrue(logic.should_transform(pattern))
def test_output_selection_should_not_transform_unselected_call(self):
noarg_tuple = _create_compiled_computation(
lambda: [tf.constant(0.), tf.constant(1.)], None)
called_noarg_tuple = building_blocks.Call(noarg_tuple, None)
output_selector = compiled_computation_transforms.SelectionFromCalledTensorFlowBlock(
)
self.assertFalse(output_selector.should_transform(called_noarg_tuple))
def test_transform_constructs_correct_root_node(self):
pattern = _create_simple_selection_from_called_graph()
logic = compiled_computation_transforms.SelectionFromCalledTensorFlowBlock()
parsed_selection, mutated = logic.transform(pattern)
self.assertIsInstance(parsed_selection, building_blocks.Call)
self.assertTrue(mutated)
def test_leaves_type_signature_alone(self):
pattern = _create_simple_selection_from_called_graph()
logic = compiled_computation_transforms.SelectionFromCalledTensorFlowBlock()
parsed, mutated = logic.transform(pattern)
self.assertEqual(parsed.type_signature, pattern.type_signature)
self.assertTrue(mutated)
def test_output_selection_executes_zeroth_element(self):
noarg_tuple = _create_compiled_computation(
lambda: [tf.constant(0.0), tf.constant(1.0)], None)
called_noarg_tuple = building_blocks.Call(noarg_tuple, None)
selected_zero = building_blocks.Selection(called_noarg_tuple, index=0)
output_selector = compiled_computation_transforms.SelectionFromCalledTensorFlowBlock(
)
parsed_zero, mutated = output_selector.transform(selected_zero)
result = compiler_test_utils.run_tensorflow(parsed_zero.function.proto)
self.assertEqual(result, 0.0)
self.assertTrue(mutated)
def test_output_selection_executes_first_element(self):
noarg_tuple = _create_compiled_computation(
lambda: [tf.constant(0.0), tf.constant(1.0)], None)
called_noarg_tuple = building_blocks.Call(noarg_tuple, None)
selected_one = building_blocks.Selection(called_noarg_tuple, index=1)
output_selector = compiled_computation_transforms.SelectionFromCalledTensorFlowBlock(
)
parsed_one, mutated = output_selector.transform(selected_one)
result = compiler_test_utils.run_tensorflow(parsed_one.function.proto)
self.assertEqual(result, 1.0)
self.assertTrue(mutated)
def test_output_selection_executes_when_selecting_by_name(self):
fn = lambda: {'a': tf.constant(0.0), 'b': tf.constant(1.0)}
noarg_tuple = _create_compiled_computation(fn, None)
called_noarg_tuple = building_blocks.Call(noarg_tuple, None)
selected_a = building_blocks.Selection(called_noarg_tuple, name='a')
output_selector = compiled_computation_transforms.SelectionFromCalledTensorFlowBlock(
)
parsed_a, mutated = output_selector.transform(selected_a)
result = compiler_test_utils.run_tensorflow(parsed_a.function.proto)
self.assertEqual(result, 0.0)
self.assertTrue(mutated)
def _create_simple_lambda_wrapping_graph():
tensor_type = computation_types.TensorType(tf.int32)
integer_identity = building_block_factory.create_compiled_identity(
tensor_type)
x_ref = building_blocks.Reference('x', tf.int32)
called_integer_identity = building_blocks.Call(integer_identity, x_ref)
lambda_wrap = building_blocks.Lambda('x', tf.int32, called_integer_identity)
return lambda_wrap
def _create_simple_lambda_calling_graph_with_arg_thrown_on_floor():
tensor_type = computation_types.TensorType(tf.int32)
integer_identity = building_block_factory.create_compiled_identity(
tensor_type)
x_data = building_blocks.Data('x', tf.int32)
called_integer_identity = building_blocks.Call(integer_identity, x_data)
lambda_wrap = building_blocks.Lambda('y', tf.int32, called_integer_identity)
return lambda_wrap
class LambdaWrappingGraphTest(test_case.TestCase, parameterized.TestCase):
def test_should_transform_identifies_correct_pattern(self):
pattern = _create_simple_lambda_wrapping_graph()
logic = compiled_computation_transforms.LambdaWrappingGraph()
self.assertTrue(logic.should_transform(pattern))
def test_should_not_transform_compiled_computation(self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
logic = compiled_computation_transforms.LambdaWrappingGraph()
self.assertFalse(logic.should_transform(integer_square))
def test_transform_constructs_correct_root_node(self):
pattern = _create_simple_lambda_wrapping_graph()
logic = compiled_computation_transforms.LambdaWrappingGraph()
parsed_selection, mutated = logic.transform(pattern)
self.assertIsInstance(parsed_selection, building_blocks.CompiledComputation)
self.assertTrue(mutated)
def test_leaves_type_signature_alone(self):
pattern = _create_simple_lambda_wrapping_graph()
logic = compiled_computation_transforms.LambdaWrappingGraph()
parsed, mutated = logic.transform(pattern)
self.assertEqual(parsed.type_signature, pattern.type_signature)
self.assertTrue(mutated)
def test_should_transform_arg_thrown_on_floor(self):
lambda_throwing_arg_on_floor = _create_simple_lambda_calling_graph_with_arg_thrown_on_floor(
)
logic = compiled_computation_transforms.LambdaWrappingGraph()
self.assertTrue(logic.should_transform(lambda_throwing_arg_on_floor))
def test_transform_with_arg_thrown_on_floow_constructs_correct_root_node(
self):
pattern = _create_simple_lambda_calling_graph_with_arg_thrown_on_floor()
logic = compiled_computation_transforms.LambdaWrappingGraph()
parsed_selection, mutated = logic.transform(pattern)
self.assertIsInstance(parsed_selection, building_blocks.CompiledComputation)
self.assertTrue(mutated)
def test_leaves_type_signature_alone_arg_thrown_on_floor(self):
pattern = _create_simple_lambda_calling_graph_with_arg_thrown_on_floor()
logic = compiled_computation_transforms.LambdaWrappingGraph()
parsed, mutated = logic.transform(pattern)
self.assertEqual(parsed.type_signature, pattern.type_signature)
self.assertTrue(mutated)
def test_unwraps_identity(self):
integer_identity = _create_simple_lambda_wrapping_graph()
lambda_unwrapper = compiled_computation_transforms.LambdaWrappingGraph()
unwrapped_function, mutated = lambda_unwrapper.transform(integer_identity)
for k in range(5):
result = compiler_test_utils.run_tensorflow(unwrapped_function.proto, k)
self.assertEqual(result, k)
self.assertTrue(mutated)
def test_unwraps_square(self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
x_ref = building_blocks.Reference('x', tf.int32)
called_integer_square = building_blocks.Call(integer_square, x_ref)
lambda_wrap = building_blocks.Lambda('x', tf.int32, called_integer_square)
lambda_unwrapper = compiled_computation_transforms.LambdaWrappingGraph()
unwrapped_function, mutated = lambda_unwrapper.transform(lambda_wrap)
for k in range(5):
result = compiler_test_utils.run_tensorflow(unwrapped_function.proto, k)
self.assertEqual(result, k * k)
self.assertTrue(mutated)
def _create_simple_tuple_of_called_graphs():
tensor_type = computation_types.TensorType(tf.float32)
called_const = building_block_factory.create_tensorflow_constant(
tensor_type, 1.0)
tuple_of_called_graphs = building_blocks.Struct([called_const, called_const])
return tuple_of_called_graphs
class StructCalledGraphsTest(test_case.TestCase, parameterized.TestCase):
def test_empty_tuple(self):
pattern = building_blocks.Struct([])
logic = compiled_computation_transforms.StructCalledGraphs()
transformed, _ = logic.transform(pattern)
self.assertEqual(transformed.type_signature, pattern.type_signature)
self.assertIsInstance(transformed, building_blocks.Call)
self.assertIsInstance(transformed.function,
building_blocks.CompiledComputation)
self.assertIsNone(transformed.argument)
def test_should_transform_identifies_correct_pattern(self):
pattern = _create_simple_tuple_of_called_graphs()
logic = compiled_computation_transforms.StructCalledGraphs()
self.assertTrue(logic.should_transform(pattern))
def test_should_not_transform_compiled_computation(self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
tuple_parser = compiled_computation_transforms.StructCalledGraphs()
self.assertFalse(tuple_parser.should_transform(integer_square))
def test_transform_constructs_correct_root_node(self):
pattern = _create_simple_tuple_of_called_graphs()
logic = compiled_computation_transforms.StructCalledGraphs()
parsed_selection, mutated = logic.transform(pattern)
self.assertIsInstance(parsed_selection, building_blocks.Call)
self.assertTrue(mutated)
def test_leaves_type_signature_alone(self):
pattern = _create_simple_tuple_of_called_graphs()
logic = compiled_computation_transforms.StructCalledGraphs()
parsed, mutated = logic.transform(pattern)
self.assertEqual(parsed.type_signature, pattern.type_signature)
self.assertTrue(mutated)
def test_named_tuple_of_graphs_preserves_type(self):
called_noarg_const_0_type = computation_types.TensorType(tf.float32)
called_noarg_const_0 = building_block_factory.create_tensorflow_constant(
called_noarg_const_0_type, 0.0)
called_noarg_const_1_type = computation_types.TensorType(tf.int32)
called_noarg_const_1 = building_block_factory.create_tensorflow_constant(
called_noarg_const_1_type, 1)
tuple_of_called_graphs = building_blocks.Struct([
('a', called_noarg_const_0), ('b', called_noarg_const_1)
])
tuple_parser = compiled_computation_transforms.StructCalledGraphs()
parsed_tuple, mutated = tuple_parser.transform(tuple_of_called_graphs)
self.assertEqual(parsed_tuple.type_signature,
tuple_of_called_graphs.type_signature)
self.assertTrue(mutated)
def test_no_arg_functions_execute(self):
called_noarg_const_0_type = computation_types.TensorType(tf.float32)
called_noarg_const_0 = building_block_factory.create_tensorflow_constant(
called_noarg_const_0_type, 0.0)
called_noarg_const_1_type = computation_types.TensorType(tf.int32)
called_noarg_const_1 = building_block_factory.create_tensorflow_constant(
called_noarg_const_1_type, 1)
tuple_of_called_graphs = building_blocks.Struct(
[called_noarg_const_0, called_noarg_const_1])
tuple_parser = compiled_computation_transforms.StructCalledGraphs()
parsed_tuple, mutated = tuple_parser.transform(tuple_of_called_graphs)
self.assertEqual(parsed_tuple.type_signature,
tuple_of_called_graphs.type_signature)
self.assertEqual(parsed_tuple.type_signature,
tuple_of_called_graphs.type_signature)
result = compiler_test_utils.run_tensorflow(parsed_tuple.function.proto, 10)
self.assertEqual(result[0], 0.0)
result = compiler_test_utils.run_tensorflow(parsed_tuple.function.proto, 0)
self.assertEqual(result[1], 1)
self.assertTrue(mutated)
def test_single_function_which_takes_a_parameter_executes(self):
called_noarg_const_0_type = computation_types.TensorType(tf.float32)
called_noarg_const_0 = building_block_factory.create_tensorflow_constant(
called_noarg_const_0_type, 0.0)
integer_square = _create_compiled_computation(
lambda x: x**2, computation_types.TensorType(tf.int32))
square_arg = building_blocks.Reference('x', tf.int32)
called_square = building_blocks.Call(integer_square, square_arg)
tuple_of_called_graphs = building_blocks.Struct(
[called_noarg_const_0, called_square])
tuple_parser = compiled_computation_transforms.StructCalledGraphs()
parsed_tuple, mutated = tuple_parser.transform(tuple_of_called_graphs)
self.assertEqual(parsed_tuple.type_signature,
tuple_of_called_graphs.type_signature)
for k in range(5):
result = compiler_test_utils.run_tensorflow(parsed_tuple.function.proto,
k)
self.assertEqual(result[0], 0.0)
self.assertEqual(result[1], k**2)
self.assertTrue(mutated)
def test_two_functions_which_takes_tensor_parameters_executes(self):
float_cube = _create_compiled_computation(
lambda x: x**3, computation_types.TensorType(tf.float32))
integer_square = _create_compiled_computation(
lambda x: x**2, computation_types.TensorType(tf.int32))
cube_arg = building_blocks.Reference('y', tf.float32)
called_cube = building_blocks.Call(float_cube, cube_arg)
square_arg = building_blocks.Reference('x', tf.int32)
called_square = building_blocks.Call(integer_square, square_arg)
tuple_of_called_graphs = building_blocks.Struct(
[called_cube, called_square])
tuple_parser = compiled_computation_transforms.StructCalledGraphs()
parsed_tuple, mutated = tuple_parser.transform(tuple_of_called_graphs)
self.assertEqual(parsed_tuple.type_signature,
tuple_of_called_graphs.type_signature)
self.assertRegexMatch(parsed_tuple.compact_representation(),
[r'comp#[a-zA-Z0-9]*\(<y,x>\)'])
for k in range(5):
result = compiler_test_utils.run_tensorflow(parsed_tuple.function.proto,
[k * 1.0, k])
self.assertEqual(result[0], (k * 1.0)**3)
self.assertEqual(result[1], k**2)
self.assertTrue(mutated)
def test_tensor_plus_tuple_parameter_executes(self):
select_from_tuple = _create_compiled_computation(
lambda x: x[0], computation_types.StructType([tf.float32, tf.float32]))
integer_square = _create_compiled_computation(
lambda x: x**2, computation_types.TensorType(tf.int32))
selection_arg = building_blocks.Reference(
'y', computation_types.StructType([tf.float32, tf.float32]))
called_selection = building_blocks.Call(select_from_tuple, selection_arg)
square_arg = building_blocks.Reference('x', tf.int32)
called_square = building_blocks.Call(integer_square, square_arg)
tuple_of_called_graphs = building_blocks.Struct(
[called_selection, called_square])
tuple_parser = compiled_computation_transforms.StructCalledGraphs()
parsed_tuple, mutated = tuple_parser.transform(tuple_of_called_graphs)
self.assertEqual(parsed_tuple.type_signature,
tuple_of_called_graphs.type_signature)
self.assertRegexMatch(parsed_tuple.compact_representation(),
[r'comp#[a-zA-Z0-9]*\(<y,x>\)'])
for k in range(5):
result = compiler_test_utils.run_tensorflow(parsed_tuple.function.proto,
[[k * 1.0, k * 2.0], k])
self.assertEqual(result[0], k * 1.0)
self.assertEqual(result[1], k**2)
self.assertTrue(mutated)
def test_tensor_plus_named_tuple_parameter_executes(self):
select_from_tuple = _create_compiled_computation(
lambda x: x.a,
computation_types.StructType([('a', tf.float32), ('b', tf.float32)]))
integer_square = _create_compiled_computation(
lambda x: x**2, computation_types.TensorType(tf.int32))
selection_arg = building_blocks.Reference('y', [('a', tf.float32),
('b', tf.float32)])
called_selection = building_blocks.Call(select_from_tuple, selection_arg)
square_arg = building_blocks.Reference('x', tf.int32)
called_square = building_blocks.Call(integer_square, square_arg)
tuple_of_called_graphs = building_blocks.Struct(
[called_selection, called_square])
tuple_parser = compiled_computation_transforms.StructCalledGraphs()
parsed_tuple, mutated = tuple_parser.transform(tuple_of_called_graphs)
self.assertEqual(parsed_tuple.type_signature,
tuple_of_called_graphs.type_signature)
self.assertRegexMatch(parsed_tuple.compact_representation(),
[r'comp#[a-zA-Z0-9]*\(<y,x>\)'])
for k in range(5):
result = compiler_test_utils.run_tensorflow(parsed_tuple.function.proto,
[[k * 1.0, k * 2.0], k])
self.assertEqual(result[0], k * 1.0)
self.assertEqual(result[1], k**2)
self.assertTrue(mutated)
def test_transform_results_in_fewer_ops_with_identical_args(self):
called_const_type = computation_types.TensorType(tf.float32)
called_const = building_block_factory.create_tensorflow_constant(
called_const_type, 1.0)
id_applied_const_type = computation_types.TensorType(tf.float32)
id_applied_const = building_blocks.Call(
building_block_factory.create_compiled_identity(id_applied_const_type),
called_const)
tuple_with_identical_args = building_blocks.Struct(
[id_applied_const, id_applied_const, id_applied_const])
called_identities = []
for dtype in [tf.float32, tf.int32, tf.int64]:
called_dtype = computation_types.TensorType(dtype)
called_scalar = building_block_factory.create_tensorflow_constant(
called_dtype, 1)
id_applied_scalar = building_blocks.Call(
building_block_factory.create_compiled_identity(called_dtype),
called_scalar)
called_identities.append(id_applied_scalar)
tuple_with_distinct_args = building_blocks.Struct(called_identities)
tuple_parser = compiled_computation_transforms.StructCalledGraphs()
identical_tuple_parsed, _ = tuple_parser.transform(
tuple_with_identical_args)
distinct_tuple_parsed, _ = tuple_parser.transform(tuple_with_distinct_args)
ops_under_identical_tuple = tree_analysis.count_tensorflow_ops_under(
identical_tuple_parsed)
ops_under_distinct_tuple = tree_analysis.count_tensorflow_ops_under(
distinct_tuple_parsed)
self.assertLess(ops_under_identical_tuple, ops_under_distinct_tuple)
def _simulate_permutation_behavior(tuple_type, permutation):
type_elements = structure.to_elements(tuple_type)
constructed_type_elements = []
for k in permutation:
constructed_type_elements.append(type_elements[k])
return computation_types.StructType(constructed_type_elements)
def _construct_permutation_tuple(n, m, offset):
assert offset + m < n
tuple_type_elements = [(str(k),
computation_types.AbstractType('T{}'.format(k)))
for k in range(n)]
initial_type = computation_types.StructType(tuple_type_elements)
selected_indices = [j + offset for j in range(m)]
return ('tuple_type_{}_select_{}_indices_offset_{}'.format(n, m, offset),
initial_type, selected_indices)
def _construct_permutation_tuple_collection(max_length):
permutation_tuples = []
for n in range(max_length):
for m in range(n):
for offset in range(n - m):
permutation_tuples.append(_construct_permutation_tuple(n, m, offset))
return permutation_tuples
class RemapGraphInputsTest(test_case.TestCase, parameterized.TestCase):
def test_raises_on_bad_computation(self):
tuple_type = computation_types.StructType([tf.int32])
bad_comp = building_blocks.Data('x', computation_types.AbstractType('T'))
with self.assertRaises(TypeError):
compiled_computation_transforms._remap_graph_inputs(
bad_comp, [0], tuple_type)
def test_raises_on_bad_type(self):
tuple_type = computation_types.StructType([tf.int32])
tuple_identity = building_block_factory.create_compiled_identity(tuple_type)
tensor_type = computation_types.TensorType(tf.int32)
with self.assertRaises(TypeError):
compiled_computation_transforms._remap_graph_inputs(
tuple_identity, [0], tensor_type)
def test_raises_on_non_list_of_indices(self):
tuple_type = computation_types.StructType([tf.int32])
tuple_identity = building_block_factory.create_compiled_identity(tuple_type)
with self.assertRaises(TypeError):
compiled_computation_transforms._remap_graph_inputs(
tuple_identity, 0, tuple_type)
def test_raises_on_repeated_indices(self):
tuple_type = computation_types.StructType([tf.int32, tf.int32])
tuple_identity = building_block_factory.create_compiled_identity(tuple_type)
with self.assertRaises(ValueError):
compiled_computation_transforms._remap_graph_inputs(
tuple_identity, [0, 0], tuple_type)
def test_raises_on_bad_index(self):
tuple_type = computation_types.StructType([tf.int32, tf.int32])
tuple_identity = building_block_factory.create_compiled_identity(tuple_type)
with self.assertRaises(ValueError):
compiled_computation_transforms._remap_graph_inputs(
tuple_identity, [-1, 0], tuple_type)
def test_permute_and_pad_index_0_of_two_tuple(self):
index_list = [0]
tuple_type = computation_types.StructType([tf.float32, tf.int32])
to_pad = compiled_computation_transforms._construct_padding(
index_list, tuple_type)
to_permute = compiled_computation_transforms._construct_permutation(
index_list, tuple_type)
result_of_applying_permutation = _simulate_permutation_behavior(
to_pad, to_permute)
self.assertEqual(to_pad, tuple_type)
self.assertEqual(to_permute, [0, 1])
self.assertEqual(result_of_applying_permutation, tuple_type)
def test_permute_and_pad_index_1_of_two_tuple(self):
index_list = [1]
tuple_type = computation_types.StructType([tf.float32, tf.int32])
to_pad = compiled_computation_transforms._construct_padding(
index_list, tuple_type)
to_permute = compiled_computation_transforms._construct_permutation(
index_list, tuple_type)
result_of_applying_permutation = _simulate_permutation_behavior(
to_pad, to_permute)
self.assertEqual(to_pad,
computation_types.StructType([tf.int32, tf.float32]))
self.assertEqual(to_permute, [1, 0])
self.assertEqual(result_of_applying_permutation, tuple_type)
def test_permute_and_pad_identity_on_two_tuple(self):
index_list = [0, 1]
tuple_type = computation_types.StructType([tf.float32, tf.int32])
to_pad = compiled_computation_transforms._construct_padding(
index_list, tuple_type)
to_permute = compiled_computation_transforms._construct_permutation(
index_list, tuple_type)
result_of_applying_permutation = _simulate_permutation_behavior(
to_pad, to_permute)
self.assertEqual(to_pad, tuple_type)
self.assertEqual(to_permute, [0, 1])
self.assertEqual(result_of_applying_permutation, tuple_type)
def test_permute_and_pad_inversion_of_two_tuple(self):
index_list = [1, 0]
tuple_type = computation_types.StructType([tf.float32, tf.int32])
to_pad = compiled_computation_transforms._construct_padding(
index_list, tuple_type)
to_permute = compiled_computation_transforms._construct_permutation(
index_list, tuple_type)
result_of_applying_permutation = _simulate_permutation_behavior(
to_pad, to_permute)
self.assertEqual(to_pad,
computation_types.StructType([tf.int32, tf.float32]))
self.assertEqual(to_permute, [1, 0])
self.assertEqual(result_of_applying_permutation, tuple_type)
def test_permute_and_pad_inversion_of_named_two_tuple(self):
index_list = [1, 0]
tuple_type = computation_types.StructType([('a', tf.float32),
('b', tf.int32)])
to_pad = compiled_computation_transforms._construct_padding(
index_list, tuple_type)
to_permute = compiled_computation_transforms._construct_permutation(
index_list, tuple_type)
result_of_applying_permutation = _simulate_permutation_behavior(
to_pad, to_permute)
self.assertEqual(
to_pad,
computation_types.StructType([('b', tf.int32), ('a', tf.float32)]))
self.assertEqual(to_permute, [1, 0])
self.assertEqual(result_of_applying_permutation, tuple_type)
def test_permute_and_pad_single_index_deep_in_tuple(self):
index_list = [5]
tuple_type_list = [tf.float32, tf.int32] * 5
tuple_type = computation_types.StructType(tuple_type_list)
to_pad = compiled_computation_transforms._construct_padding(
index_list, tuple_type)
to_permute = compiled_computation_transforms._construct_permutation(
index_list, tuple_type)
to_pad_first_type = tuple_type_list.pop(5)
tuple_type_list.insert(0, to_pad_first_type)
self.assertEqual(to_pad, computation_types.StructType(tuple_type_list))
self.assertEqual(to_permute, [1, 2, 3, 4, 5, 0, 6, 7, 8, 9])
result_of_applying_permutation = _simulate_permutation_behavior(
to_pad, to_permute)
self.assertEqual(result_of_applying_permutation, tuple_type)
@parameterized.named_parameters(*_construct_permutation_tuple_collection(5))
def test_permute_and_pad_round_trip(self, initial_type, selected_indices):
to_pad = compiled_computation_transforms._construct_padding(
selected_indices, initial_type)
to_permute = compiled_computation_transforms._construct_permutation(
selected_indices, initial_type)
result_of_applying_permutation = _simulate_permutation_behavior(
to_pad, to_permute)
self.assertEqual(result_of_applying_permutation, initial_type)
class ComposeTensorFlowBlocksTest(test_case.TestCase, parameterized.TestCase):
def test_raises_on_none(self):
with self.assertRaises(TypeError):
compiled_computation_transforms.compose_tensorflow_blocks(None)
def test_raises_on_single_computation(self):
tuple_type = computation_types.StructType([tf.int32, tf.float32])
identity = building_block_factory.create_compiled_identity(tuple_type)
with self.assertRaises(TypeError):
compiled_computation_transforms.compose_tensorflow_blocks(identity)
def test_raises_bad_arg_in_list(self):
tuple_type = computation_types.StructType([tf.int32, tf.float32])
identity = building_block_factory.create_compiled_identity(tuple_type)
with self.assertRaises(TypeError):
compiled_computation_transforms.compose_tensorflow_blocks([identity, 0])
def test_raises_mismatched_parameter_and_result_types(self):
tuple_type = computation_types.StructType([tf.int32, tf.float32])
identity = building_block_factory.create_compiled_identity(tuple_type)
bad_tuple_type = computation_types.StructType([tf.float32, tf.int32])
bad_identity = building_block_factory.create_compiled_identity(
bad_tuple_type)
with self.assertRaises(TypeError):
compiled_computation_transforms.compose_tensorflow_blocks(
[identity, bad_identity])
def test_composes_no_arg_fn_with_add_one_types_correctly(self):
tensor_type = computation_types.TensorType(tf.int32)
noarg_fn = building_block_factory.create_tensorflow_constant(tensor_type, 0)
add_one_fn = _create_compiled_computation(
lambda x: x + 1, computation_types.TensorType(tf.int32))
composed_fn = compiled_computation_transforms.compose_tensorflow_blocks(
[add_one_fn, noarg_fn.function])
expected_type = computation_types.FunctionType(None, tf.int32)
self.assertEqual(composed_fn.type_signature, expected_type)
def test_composes_no_arg_fn_with_add_one_executes_correctly(self):
tensor_type = computation_types.TensorType(tf.int32)
noarg_fn = building_block_factory.create_tensorflow_constant(tensor_type, 0)
add_one_fn = _create_compiled_computation(
lambda x: x + 1, computation_types.TensorType(tf.int32))
composed_fn = compiled_computation_transforms.compose_tensorflow_blocks(
[add_one_fn, noarg_fn.function])
result = compiler_test_utils.run_tensorflow(composed_fn.proto)
self.assertEqual(result, 1)
def test_composes_tensor_functions_types_correctly(self):
int_to_float_fn = _create_compiled_computation(
lambda x: tf.cast(x, tf.float32) * 2.0,
computation_types.TensorType(tf.int32))
float_to_float_fn = _create_compiled_computation(
lambda x: x * 2.0, computation_types.TensorType(tf.float32))
composed_fn = compiled_computation_transforms.compose_tensorflow_blocks(
[float_to_float_fn, int_to_float_fn])
expected_type = computation_types.FunctionType(tf.int32, tf.float32)
self.assertEqual(composed_fn.type_signature, expected_type)
def test_composes_tensor_function_executes_correctly(self):
int_to_float_fn = _create_compiled_computation(
lambda x: tf.cast(x, tf.float32) * 2.0,
computation_types.TensorType(tf.int32))
float_to_float_fn = _create_compiled_computation(
lambda x: x * 2.0, computation_types.TensorType(tf.float32))
composed_fn = compiled_computation_transforms.compose_tensorflow_blocks(
[float_to_float_fn, int_to_float_fn])
for k in range(5):
result = compiler_test_utils.run_tensorflow(composed_fn.proto, k)
self.assertEqual(result, k * 4.0)
def test_compose_integer_identities_executes_correctly(self):
tensor_type = computation_types.TensorType(tf.int32)
identity = building_block_factory.create_compiled_identity(tensor_type)
composed = compiled_computation_transforms.compose_tensorflow_blocks(
[identity, identity])
result = compiler_test_utils.run_tensorflow(composed.proto, 0)
self.assertEqual(result, 0)
def test_composes_unnamed_tuple_functions_types_correctly(self):
int_float_flip = _create_compiled_computation(
lambda x: [x[1], x[0]],
computation_types.StructType([tf.int32, tf.float32]))
float_int_flip = _create_compiled_computation(
lambda x: [x[1], x[0]],
computation_types.StructType([tf.float32, tf.int32]))
composed_fn_float_int = compiled_computation_transforms.compose_tensorflow_blocks(
[int_float_flip, float_int_flip])
composed_fn_int_float = compiled_computation_transforms.compose_tensorflow_blocks(
[float_int_flip, int_float_flip])
expected_type_int_float = computation_types.FunctionType(
[tf.int32, tf.float32], [tf.int32, tf.float32])
expected_type_float_int = computation_types.FunctionType(
[tf.float32, tf.int32], [tf.float32, tf.int32])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
composed_fn_float_int.type_signature.check_equivalent_to(
expected_type_float_int)
composed_fn_int_float.type_signature.check_equivalent_to(
expected_type_int_float)
def test_composes_unnamed_tuple_functions_executes_correctly(self):
int_float_flip = _create_compiled_computation(
lambda x: [x[1], x[0]],
computation_types.StructType([tf.int32, tf.float32]))
float_int_flip = _create_compiled_computation(
lambda x: [x[1], x[0]],
computation_types.StructType([tf.float32, tf.int32]))
composed_fn_float_int = compiled_computation_transforms.compose_tensorflow_blocks(
[int_float_flip, float_int_flip])
result = compiler_test_utils.run_tensorflow(composed_fn_float_int.proto,
[10.0, 0])
self.assertEqual(result[0], 10.0)
self.assertEqual(result[1], 0)
self.assertLen(result, 2)
composed_fn_int_float = compiled_computation_transforms.compose_tensorflow_blocks(
[float_int_flip, int_float_flip])
result = compiler_test_utils.run_tensorflow(composed_fn_int_float.proto,
[10, 0.0])
self.assertEqual(result[0], 10)
self.assertEqual(result[1], 0.0)
self.assertLen(result, 2)
def test_composes_named_tuple_function_with_unnamed_tuple_function_types_correctly(
self):
drop_names = _create_compiled_computation(
lambda x: [x[0], x[1]],
computation_types.StructType([('a', tf.int32), ('b', tf.float32)]))
unamed_types = computation_types.StructType([tf.int32, tf.float32])
unnamed_identity = building_block_factory.create_compiled_identity(
unamed_types)
composed = compiled_computation_transforms.compose_tensorflow_blocks(
[unnamed_identity, drop_names])
expected_type = computation_types.FunctionType([('a', tf.int32),
('b', tf.float32)],
[tf.int32, tf.float32])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
composed.type_signature.check_equivalent_to(expected_type)
def test_composes_named_tuple_function_with_unnamed_tuple_function_executes_correctly(
self):
drop_names = _create_compiled_computation(
lambda x: [x[0], x[1]],
computation_types.StructType([('a', tf.int32), ('b', tf.float32)]))
unamed_types = computation_types.StructType([tf.int32, tf.float32])
unnamed_identity = building_block_factory.create_compiled_identity(
unamed_types)
composed = compiled_computation_transforms.compose_tensorflow_blocks(
[unnamed_identity, drop_names])
result = compiler_test_utils.run_tensorflow(composed.proto, {
'a': 0,
'b': 1.0
})
self.assertEqual(result[0], 0)
self.assertEqual(result[1], 1.0)
self.assertLen(result, 2)
def test_composes_named_tuple_functions_types_correctly(self):
flip_order = _create_compiled_computation(
lambda x: collections.OrderedDict([('b', x.b), ('a', x.a)]),
computation_types.StructType([('a', tf.int32), ('b', tf.float32)]))
identity = _create_compiled_computation(
lambda x: collections.OrderedDict([('b', x.b), ('a', x.a)]),
computation_types.StructType([('b', tf.float32), ('a', tf.int32)]))
composed = compiled_computation_transforms.compose_tensorflow_blocks(
[identity, flip_order])
expected_type = computation_types.FunctionType([('a', tf.int32),
('b', tf.float32)],
[('b', tf.float32),
('a', tf.int32)])
# TODO(b/157172423): change to assertEqual when Py container is preserved.
composed.type_signature.check_equivalent_to(expected_type)
def test_composes_named_tuple_functions_executes_correctly(self):
flip_order = _create_compiled_computation(
lambda x: collections.OrderedDict([('b', x.b), ('a', x.a)]),
computation_types.StructType([('a', tf.int32), ('b', tf.float32)]))
identity = _create_compiled_computation(
lambda x: collections.OrderedDict([('b', x.b), ('a', x.a)]),
computation_types.StructType([('b', tf.float32), ('a', tf.int32)]))
composed = compiled_computation_transforms.compose_tensorflow_blocks(
[identity, flip_order])
result = compiler_test_utils.run_tensorflow(
composed.proto, collections.OrderedDict({
'a': 0,
'b': 1.0,
}))
self.assertEqual(result[0], 1.0)
self.assertEqual(result[1], 0)
self.assertLen(result, 2)
def test_composes_sequence_functions_types_correctly(self):
reduce_ds = _create_compiled_computation(
lambda ds: ds.reduce(tf.constant(0, tf.int64), lambda x, y: x + y),
computation_types.SequenceType(tf.int64))
produce_ds = _create_compiled_computation(lambda: tf.data.Dataset.range(5),
None)
integer_result = compiled_computation_transforms.compose_tensorflow_blocks(
[reduce_ds, produce_ds])
self.assertEqual(integer_result.type_signature,
computation_types.FunctionType(None, tf.int64))
def test_composes_sequence_functions_executes_correctly(self):
reduce_ds = _create_compiled_computation(
lambda ds: ds.reduce(tf.constant(0, tf.int64), lambda x, y: x + y),
computation_types.SequenceType(tf.int64))
produce_ds = _create_compiled_computation(lambda: tf.data.Dataset.range(5),
None)
integer_result = compiled_computation_transforms.compose_tensorflow_blocks(
[reduce_ds, produce_ds])
result = compiler_test_utils.run_tensorflow(integer_result.proto)
self.assertEqual(result, 10)
def _create_simple_called_composition_of_tf_blocks():
tensor_type = computation_types.TensorType(tf.int32)
zero = building_block_factory.create_tensorflow_constant(tensor_type, 0)
add_one = _create_compiled_computation(lambda x: x + 1,
computation_types.TensorType(tf.int32))
one = building_blocks.Call(add_one, zero)
return one
class CalledCompositionOfTensorFlowBlocksTest(test_case.TestCase,
parameterized.TestCase):
def test_should_transform_identifies_correct_pattern(self):
pattern = _create_simple_called_composition_of_tf_blocks()
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
self.assertTrue(logic.should_transform(pattern))
def test_should_not_transform_compiled_computation(self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
self.assertFalse(logic.should_transform(integer_square))
def test_should_not_transform_single_called_compiled_computation(self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
int_ref = building_blocks.Reference('x', tf.int32)
called_square = building_blocks.Call(integer_square, int_ref)
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
self.assertFalse(logic.should_transform(called_square))
def test_should_not_transform_called_lambda_on_called_compiled_computation(
self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
int_ref = building_blocks.Reference('x', tf.int32)
called_square = building_blocks.Call(integer_square, int_ref)
lambda_wrapper = building_blocks.Lambda('x', tf.int32, called_square)
outer_int_ref = building_blocks.Reference('y', tf.int32)
called_lambda = building_blocks.Call(lambda_wrapper, outer_int_ref)
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
self.assertFalse(logic.should_transform(called_lambda))
def test_does_not_transform_compiled_computation(self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
parsed, mutated = logic.transform(integer_square)
self.assertEqual(parsed, integer_square)
self.assertFalse(mutated)
def test_transform_constructs_correct_root_node(self):
pattern = _create_simple_called_composition_of_tf_blocks()
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
parsed, mutated = logic.transform(pattern)
self.assertIsInstance(parsed, building_blocks.Call)
self.assertIsInstance(parsed.function, building_blocks.CompiledComputation)
self.assertTrue(mutated)
def test_transform_reduces_number_of_compiled_computations(self):
pattern = _create_simple_called_composition_of_tf_blocks()
original_count = tree_analysis.count_types(
pattern, building_blocks.CompiledComputation)
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
parsed, _ = logic.transform(pattern)
new_count = tree_analysis.count_types(parsed,
building_blocks.CompiledComputation)
self.assertLess(new_count, original_count)
def test_leaves_type_signature_alone(self):
pattern = _create_simple_called_composition_of_tf_blocks()
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
parsed, mutated = logic.transform(pattern)
self.assertEqual(parsed.type_signature, pattern.type_signature)
self.assertTrue(mutated)
def test_executes_correctly(self):
pattern = _create_simple_called_composition_of_tf_blocks()
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
parsed, _ = logic.transform(pattern)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, 0)
self.assertEqual(result, 1)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, 1)
self.assertEqual(result, 1)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, 2)
self.assertEqual(result, 1)
def test_constructs_correct_type_signature_named_tuple_argument(self):
tuple_type = computation_types.StructType([('a', tf.int32),
('b', tf.float32)])
identity = building_block_factory.create_compiled_identity(tuple_type)
sel_int = _create_compiled_computation(
lambda x: x.a,
computation_types.StructType([('a', tf.int32), ('b', tf.float32)]))
tuple_reference = building_blocks.Reference('x', [('a', tf.int32),
('b', tf.float32)])
called_identity = building_blocks.Call(identity, tuple_reference)
called_integer_selection = building_blocks.Call(sel_int, called_identity)
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
parsed, mutated = logic.transform(called_integer_selection)
self.assertEqual(parsed.type_signature,
called_integer_selection.type_signature)
self.assertEqual(parsed.argument.type_signature,
tuple_reference.type_signature)
self.assertTrue(mutated)
def test_executes_named_tuple_argument(self):
tuple_type = computation_types.StructType([('a', tf.int32),
('b', tf.float32)])
identity = building_block_factory.create_compiled_identity(tuple_type)
sel_int = _create_compiled_computation(
lambda x: x.a,
computation_types.StructType([('a', tf.int32), ('b', tf.float32)]))
tuple_reference = building_blocks.Reference('x', [('a', tf.int32),
('b', tf.float32)])
called_identity = building_blocks.Call(identity, tuple_reference)
called_integer_selection = building_blocks.Call(sel_int, called_identity)
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
parsed, _ = logic.transform(called_integer_selection)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, {
'a': 1,
'b': 0.0
})
self.assertEqual(result, 1)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, {
'a': 0,
'b': 1.0
})
self.assertEqual(result, 0)
def test_constructs_correct_type_signature_named_tuple_result(self):
namer = _create_compiled_computation(
lambda x: collections.OrderedDict([('a', x[0]), ('b', x[1])]),
computation_types.StructType([tf.int32, tf.float32]))
tuple_type = computation_types.StructType([tf.int32, tf.float32])
identity = building_block_factory.create_compiled_identity(tuple_type)
tuple_reference = building_blocks.Reference('x', [tf.int32, tf.float32])
called_identity = building_blocks.Call(identity, tuple_reference)
called_namer = building_blocks.Call(namer, called_identity)
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
parsed, mutated = logic.transform(called_namer)
self.assertEqual(parsed.type_signature, called_namer.type_signature)
self.assertTrue(mutated)
def test_executes_correctly_named_tuple_result(self):
namer = _create_compiled_computation(
lambda x: collections.OrderedDict([('a', x[0]), ('b', x[1])]),
computation_types.StructType([tf.int32, tf.float32]))
tuple_type = computation_types.StructType([tf.int32, tf.float32])
identity = building_block_factory.create_compiled_identity(tuple_type)
tuple_reference = building_blocks.Reference('x', [tf.int32, tf.float32])
called_identity = building_blocks.Call(identity, tuple_reference)
called_namer = building_blocks.Call(namer, called_identity)
logic = compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(
)
parsed, _ = logic.transform(called_namer)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, [1, 0.0])
self.assertEqual(result[0], 1)
self.assertEqual(result.a, 1)
self.assertEqual(result[1], 0.)
self.assertEqual(result.b, 0.)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, [0, 1.0])
self.assertEqual(result[0], 0)
self.assertEqual(result.a, 0)
self.assertEqual(result[1], 1.0)
self.assertEqual(result.b, 1.0)
def _create_simple_called_graph_on_replicated_arg(n_replicates=2):
tuple_type = computation_types.StructType([tf.int32] * n_replicates)
tuple_identity = building_block_factory.create_compiled_identity(tuple_type)
ref_to_int = building_blocks.Reference('x', tf.int32)
called_tuple_id = building_blocks.Call(
tuple_identity, building_blocks.Struct([ref_to_int] * n_replicates))
return called_tuple_id
class CalledGraphOnReplicatedArgTest(test_case.TestCase):
def test_should_transform_identifies_correct_pattern(self):
pattern = _create_simple_called_graph_on_replicated_arg()
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
self.assertTrue(logic.should_transform(pattern))
def test_should_transform_identifies_longer_pattern(self):
pattern = _create_simple_called_graph_on_replicated_arg(n_replicates=5)
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
self.assertTrue(logic.should_transform(pattern))
def test_should_not_transform_compiled_computation(self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
self.assertFalse(logic.should_transform(integer_square))
def test_should_not_transform_non_tuple_wrapped_lambda_to_called_graph(self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
int_ref = building_blocks.Reference('x', tf.int32)
called_square = building_blocks.Call(integer_square, int_ref)
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
self.assertFalse(logic.should_transform(called_square))
def test_does_not_transform_compiled_computation(self):
integer_square = _create_compiled_computation(
lambda x: x * x, computation_types.TensorType(tf.int32))
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
parsed, mutated = logic.transform(integer_square)
self.assertEqual(parsed, integer_square)
self.assertFalse(mutated)
def test_transform_constructs_correct_root_node(self):
pattern = _create_simple_called_graph_on_replicated_arg()
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
parsed, mutated = logic.transform(pattern)
self.assertIsInstance(parsed, building_blocks.Call)
self.assertTrue(mutated)
def test_leaves_type_signature_alone(self):
pattern = _create_simple_called_graph_on_replicated_arg()
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
parsed, mutated = logic.transform(pattern)
self.assertEqual(parsed.type_signature, pattern.type_signature)
self.assertTrue(mutated)
def test_executes_correctly_simple_case(self):
pattern = _create_simple_called_graph_on_replicated_arg()
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
parsed, _ = logic.transform(pattern)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, 0)
self.assertEqual(result, structure.Struct([(None, 0), (None, 0)]))
result = compiler_test_utils.run_tensorflow(parsed.function.proto, 1)
self.assertEqual(result, structure.Struct([(None, 1), (None, 1)]))
result = compiler_test_utils.run_tensorflow(parsed.function.proto, 2)
self.assertEqual(result, structure.Struct([(None, 2), (None, 2)]))
def test_executes_correctly_several_replicates(self):
pattern = _create_simple_called_graph_on_replicated_arg(n_replicates=5)
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
parsed, _ = logic.transform(pattern)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, 0)
for k in range(5):
self.assertEqual(result[k], 0)
self.assertLen(result, 5)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, 1)
for k in range(5):
self.assertEqual(result[k], 1)
self.assertLen(result, 5)
def test_constructs_correct_type_signature_nested_tuple_argument(self):
slicer = _create_compiled_computation(
lambda x: [x[0][0], x[1][1]],
computation_types.StructType([[tf.int32, tf.float32],
[tf.int32, tf.float32]]))
tuple_reference = building_blocks.Reference('x', [tf.int32, tf.float32])
called_slicer = building_blocks.Call(
slicer, building_blocks.Struct([tuple_reference, tuple_reference]))
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
parsed, mutated = logic.transform(called_slicer)
self.assertEqual(parsed.type_signature, called_slicer.type_signature)
self.assertTrue(mutated)
def test_constructs_correct_type_signature_nested_named_tuple_argument(self):
slicer = _create_compiled_computation(
lambda x: [x[0][0], x[1][1]],
computation_types.StructType([[('a', tf.int32), ('b', tf.float32)],
[('a', tf.int32), ('b', tf.float32)]]))
tuple_reference = building_blocks.Reference('x', [('a', tf.int32),
('b', tf.float32)])
called_slicer = building_blocks.Call(
slicer, building_blocks.Struct([tuple_reference, tuple_reference]))
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
parsed, mutated = logic.transform(called_slicer)
self.assertEqual(parsed.type_signature, called_slicer.type_signature)
self.assertTrue(mutated)
def test_execution_nested_tuple_argument(self):
slicer = _create_compiled_computation(
lambda x: [x[0][0], x[1][1]],
computation_types.StructType([[tf.int32, tf.float32],
[tf.int32, tf.float32]]))
tuple_reference = building_blocks.Reference('x', [tf.int32, tf.float32])
called_slicer = building_blocks.Call(
slicer, building_blocks.Struct([tuple_reference, tuple_reference]))
logic = compiled_computation_transforms.CalledGraphOnReplicatedArg()
parsed, _ = logic.transform(called_slicer)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, [0, 1.0])
self.assertEqual(result[0], 0)
self.assertEqual(result[1], 1.0)
result = compiler_test_utils.run_tensorflow(parsed.function.proto, [1, 0.0])
self.assertEqual(result[0], 1)
self.assertEqual(result[1], 0.)
def _create_simple_lambda_wrapping_noarg_graph():
embedded_type = computation_types.TensorType(tf.int32)
embedded_constant = building_block_factory.create_tensorflow_constant(
embedded_type, 0)
return building_blocks.Lambda('x', tf.float32, embedded_constant)
class LambdaWrappingNoArgGraphTest(test_case.TestCase, parameterized.TestCase):
def test_should_transform_identifies_correct_pattern(self):
pattern = _create_simple_lambda_wrapping_noarg_graph()
logic = compiled_computation_transforms.LambdaWrappingNoArgGraph()
self.assertTrue(logic.should_transform(pattern))
def test_should_transform_does_not_identify_lambda_to_graph_with_arg(self):
pattern = _create_simple_lambda_wrapping_graph()
logic = compiled_computation_transforms.LambdaWrappingNoArgGraph()
self.assertFalse(logic.should_transform(pattern))
def test_transform_leaves_type_signature_untouched(self):
pattern = _create_simple_lambda_wrapping_noarg_graph()
logic = compiled_computation_transforms.LambdaWrappingNoArgGraph()
parsed, _ = logic.transform(pattern)
self.assertEqual(parsed.type_signature, pattern.type_signature)
def test_transform_constructs_correct_root_node(self):
pattern = _create_simple_lambda_wrapping_noarg_graph()
logic = compiled_computation_transforms.LambdaWrappingNoArgGraph()
parsed, _ = logic.transform(pattern)
self.assertIsInstance(parsed, building_blocks.CompiledComputation)
def test_updates_init_op(self):
with tf.Graph().as_default() as graph:
var = tf.Variable(initial_value=0.0, name='var1', import_scope='')
assign_op = var.assign_add(tf.constant(1.0))
out = tf.add(1.0, assign_op)
init_op_name = tf.compat.v1.global_variables_initializer().name
result_type, result_binding = tensorflow_utils.capture_result_from_graph(
out, graph)
type_spec = computation_types.FunctionType(None, result_type)
serialized_type_spec = type_serialization.serialize_type(type_spec)
proto_with_init_op = pb.TensorFlow(
graph_def=serialization_utils.pack_graph_def(graph.as_graph_def()),
initialize_op=init_op_name,
result=result_binding)
constant_with_init_op = building_blocks.Call(
building_blocks.CompiledComputation(
pb.Computation(
type=serialized_type_spec, tensorflow=proto_with_init_op)),
None)
lambda_wrapping_constant = building_blocks.Lambda('x', tf.float32,
constant_with_init_op)
logic = compiled_computation_transforms.LambdaWrappingNoArgGraph()
parsed, transformed = logic.transform(lambda_wrapping_constant)
self.assertTrue(transformed)
split_init_op_name = parsed.proto.tensorflow.initialize_op.split('/')
self.assertNotEmpty(split_init_op_name[0])
self.assertEqual(split_init_op_name[1], init_op_name)
@parameterized.named_parameters([(str(n), n * 1.0) for n in range(10)])
def test_function_returned_independent_of_argument(self, arg):
pattern = _create_simple_lambda_wrapping_noarg_graph()
logic = compiled_computation_transforms.LambdaWrappingNoArgGraph()
parsed, _ = logic.transform(pattern)
result = compiler_test_utils.run_tensorflow(parsed.proto, arg)
self.assertEqual(result, 0)
class TensorFlowOptimizerTest(test_case.TestCase):
def test_should_transform_compiled_computation(self):
tuple_type = computation_types.TensorType(tf.int32)
compiled_computation = building_block_factory.create_compiled_identity(
tuple_type)
config = tf.compat.v1.ConfigProto()
tf_optimizer = compiled_computation_transforms.TensorFlowOptimizer(config)
self.assertTrue(tf_optimizer.should_transform(compiled_computation))
def test_should_not_transform_reference(self):
reference = building_blocks.Reference('x', tf.int32)
config = tf.compat.v1.ConfigProto()
tf_optimizer = compiled_computation_transforms.TensorFlowOptimizer(config)
self.assertFalse(tf_optimizer.should_transform(reference))
def test_transform_compiled_computation_returns_compiled_computation(self):
tuple_type = computation_types.TensorType(tf.int32)
compiled_computation = building_block_factory.create_compiled_identity(
tuple_type)
config = tf.compat.v1.ConfigProto()
tf_optimizer = compiled_computation_transforms.TensorFlowOptimizer(config)
transformed_comp, mutated = tf_optimizer.transform(compiled_computation)
self.assertTrue(mutated)
self.assertIsInstance(transformed_comp, building_blocks.CompiledComputation)
self.assertTrue(transformed_comp.proto.tensorflow.HasField('parameter'))
self.assertFalse(transformed_comp.proto.tensorflow.initialize_op)
def test_transform_compiled_computation_returns_compiled_computation_without_empty_fields(
self):
compiled_computation = building_block_factory.create_compiled_no_arg_empty_tuple_computation(
)
config = tf.compat.v1.ConfigProto()
tf_optimizer = compiled_computation_transforms.TensorFlowOptimizer(config)
transformed_comp, mutated = tf_optimizer.transform(compiled_computation)
self.assertTrue(mutated)
self.assertIsInstance(transformed_comp, building_blocks.CompiledComputation)
self.assertFalse(transformed_comp.proto.tensorflow.HasField('parameter'))
self.assertFalse(transformed_comp.proto.tensorflow.initialize_op)
def test_transform_compiled_computation_semantic_equivalence(self):
tuple_type = computation_types.TensorType(tf.int32)
compiled_computation = building_block_factory.create_compiled_identity(
tuple_type)
config = tf.compat.v1.ConfigProto()
tf_optimizer = compiled_computation_transforms.TensorFlowOptimizer(config)
transformed_comp, mutated = tf_optimizer.transform(compiled_computation)
self.assertTrue(mutated)
self.assertIsInstance(transformed_comp, building_blocks.CompiledComputation)
zero_before_transform = compiler_test_utils.run_tensorflow(
compiled_computation.proto, 0)
zero_after_transform = compiler_test_utils.run_tensorflow(
transformed_comp.proto, 0)
self.assertEqual(zero_before_transform, zero_after_transform)
class AddUniqueIDsTest(test_case.TestCase):
def test_should_transform_compiled_tf_computation(self):
tuple_type = computation_types.TensorType(tf.int32)
compiled_computation = building_block_factory.create_compiled_identity(
tuple_type)
self.assertTrue(
compiled_computation_transforms.AddUniqueIDs().should_transform(
compiled_computation))
def test_should_not_transform_non_compiled_computations(self):
reference = building_blocks.Reference('x', tf.int32)
self.assertFalse(
compiled_computation_transforms.AddUniqueIDs().should_transform(
reference))
def test_transform_compiled_computation_returns_compiled_computation_with_id(
self):
tuple_type = computation_types.TensorType(tf.int32)
compiled_computation = building_block_factory.create_compiled_identity(
tuple_type)
add_ids = compiled_computation_transforms.AddUniqueIDs()
with self.subTest('first_comp_non_zero_id'):
first_transformed_comp, mutated = add_ids.transform(compiled_computation)
self.assertTrue(mutated)
self.assertIsInstance(first_transformed_comp,
building_blocks.CompiledComputation)
self.assertTrue(first_transformed_comp.proto.tensorflow.HasField('id'))
self.assertNotEqual(first_transformed_comp.proto.tensorflow.id, 0)
with self.subTest('second_comp_same_id'):
second_transformed_comp, mutated = add_ids.transform(compiled_computation)
self.assertTrue(mutated)
self.assertIsInstance(second_transformed_comp,
building_blocks.CompiledComputation)
self.assertTrue(second_transformed_comp.proto.tensorflow.HasField('id'))
self.assertNotEqual(second_transformed_comp.proto.tensorflow.id, 0)
self.assertEqual(first_transformed_comp.proto.tensorflow.id,
second_transformed_comp.proto.tensorflow.id)
with self.subTest('restart_transformation_same_id'):
# Test that the sequence ids are the same if we run a new compiler pass.
# With compiler running inside the `invoke` call, we need to ensure
# running different computations doesn't produce the same ids.
add_ids = compiled_computation_transforms.AddUniqueIDs()
third_transformed_comp, mutated = add_ids.transform(compiled_computation)
self.assertTrue(mutated)
self.assertTrue(third_transformed_comp.proto.tensorflow.HasField('id'))
self.assertNotEqual(third_transformed_comp.proto.tensorflow.id, 0)
self.assertEqual(first_transformed_comp.proto.tensorflow.id,
third_transformed_comp.proto.tensorflow.id)
with self.subTest('different_computation_different_id'):
different_compiled_computation = _create_compiled_computation(
lambda x: x + tf.constant(1.0),
computation_types.TensorType(tf.float32))
different_transformed_comp, mutated = add_ids.transform(
different_compiled_computation)
self.assertTrue(mutated)
self.assertTrue(
different_transformed_comp.proto.tensorflow.HasField('id'))
self.assertNotEqual(different_transformed_comp.proto.tensorflow.id, 0)
self.assertNotEqual(first_transformed_comp.proto.tensorflow.id,
different_transformed_comp.proto.tensorflow.id)
if __name__ == '__main__':
test_case.main()
| 48.572547 | 97 | 0.741823 | 12,357 | 105,451 | 5.935745 | 0.038602 | 0.067091 | 0.066028 | 0.031548 | 0.890222 | 0.868217 | 0.830002 | 0.793805 | 0.76257 | 0.726128 | 0 | 0.015757 | 0.17189 | 105,451 | 2,170 | 98 | 48.594931 | 0.824186 | 0.016083 | 0 | 0.658455 | 0 | 0 | 0.00645 | 0.002256 | 0 | 0 | 0 | 0.000461 | 0.183091 | 1 | 0.091265 | false | 0 | 0.010078 | 0 | 0.115901 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
67e5c30e0422e1ae25dba42ae5fbad007bf26145 | 26 | py | Python | web/photos/__init__.py | wabscale/tandon.singles | 5d83b5c5d3b6aa8e67d781223e99512c78165c22 | [
"MIT"
] | 1 | 2022-02-25T02:12:18.000Z | 2022-02-25T02:12:18.000Z | web/photos/__init__.py | wabscale/tandon.singles | 5d83b5c5d3b6aa8e67d781223e99512c78165c22 | [
"MIT"
] | null | null | null | web/photos/__init__.py | wabscale/tandon.singles | 5d83b5c5d3b6aa8e67d781223e99512c78165c22 | [
"MIT"
] | null | null | null | from .routes import photos | 26 | 26 | 0.846154 | 4 | 26 | 5.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.115385 | 26 | 1 | 26 | 26 | 0.956522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
67f0f16a55799448c3e5fedb6c8e4e86124abcac | 9,119 | py | Python | src/utils/log.py | GoDa-Choe/capstone_design | cb3ce264c7720594a64b7e1717247ad12c522116 | [
"Apache-2.0"
] | null | null | null | src/utils/log.py | GoDa-Choe/capstone_design | cb3ce264c7720594a64b7e1717247ad12c522116 | [
"Apache-2.0"
] | null | null | null | src/utils/log.py | GoDa-Choe/capstone_design | cb3ce264c7720594a64b7e1717247ad12c522116 | [
"Apache-2.0"
] | null | null | null | import datetime
from pathlib import Path
from src.dataset.category import CATEGORY
from src.utils.project_root import PROJECT_ROOT
def blue(text):
return '\033[94m' + text + '\033[0m'
def logging_for_cd_test(validation_result):
def log_line(loss, batch_index):
return f"{loss / batch_index * 10_000:.6f}"
validation_log = log_line(*validation_result)
print(blue(validation_log))
def logging_for_cd_train(file, epoch, train_result, validation_result):
def log_line(loss, batch_index):
return f"{loss / batch_index * 10_000:.6f}"
train_log = log_line(*train_result)
validation_log = log_line(*validation_result)
print(epoch, train_log, blue(validation_log))
if file:
log = f"{epoch} {train_log} {validation_log}\n"
file.write(log)
def logging_for_test(test_result):
def log_line(loss, batch_index, correct, count):
return f"{loss / batch_index:.6f} {correct / count:.6f}"
def category_log_line(category_correct, category_count):
log = ""
for i in range(len(category_correct)):
if category_count[i] == 0: # for reduced MVP12 zero division error exception
log += f"None "
else:
log += f"{category_correct[i] / category_count[i]:.2f} "
return log
def category_log_line_for_monitor(category_correct, category_count):
log = ""
for i in range(len(category_correct)):
if category_count[i] == 0: # for reduced MVP12 zero division error exception
log += f"{CATEGORY[i]}-None "
else:
log += f"{CATEGORY[i]}-{category_correct[i] / category_count[i]:.2f} "
return log
total_test_result = test_result[:4]
category_test_result = test_result[4:]
total_test_log = log_line(*total_test_result)
category_test_log = category_log_line(*category_test_result)
category_test_log_for_monitor = category_log_line_for_monitor(*category_test_result)
print(blue(total_test_log), category_test_log_for_monitor)
print(category_test_log)
def logging_for_train(file, epoch, train_result, validation_result):
def log_line(loss, batch_index, correct, count):
return f"{loss / batch_index:.6f} {correct / count:.6f}"
def category_log_line_for_monitor(category_correct, category_count):
log = ""
for i in range(len(category_correct)):
if category_count[i] == 0: # for reduced MVP12 zero division error exception
log += f"{CATEGORY[i]}-None "
else:
log += f"{CATEGORY[i]}-{category_correct[i] / category_count[i]:.2f} "
return log
def category_log_line(category_correct, category_count):
log = ""
for i in range(len(category_correct)):
if category_count[i] == 0: # for reduced MVP12 zero division error exception
log += f"None "
else:
log += f"{category_correct[i] / category_count[i]:.2f} "
return log
total_validation_result = validation_result[:4]
category_validation_result = validation_result[4:]
train_log = log_line(*train_result)
total_test_log = log_line(*total_validation_result)
category_test_log = category_log_line(*category_validation_result)
category_test_log_for_monitor = category_log_line_for_monitor(*category_validation_result)
print(epoch, train_log, blue(total_test_log), category_test_log_for_monitor)
if file:
log = f"{epoch} {train_log} {total_test_log} {category_test_log}\n"
file.write(log)
def logging(file, epoch, train_result, test_result):
def log_line(loss, correct, count):
return f"{loss / count:.6f} {correct / count:.6f}"
def category_log_line_for_monitor(category_correct, category_count):
log = ""
for i in range(len(category_correct)):
if category_count[i] == 0: # for reduced MVP12 zero division error exception
log += f"{CATEGORY[i]}-{0:.2f} "
else:
log += f"{CATEGORY[i]}-{category_correct[i] / category_count[i]:.2f} "
return log
def category_log_line(category_correct, category_count):
log = ""
for i in range(len(category_correct)):
if category_count[i] == 0: # for reduced MVP12 zero division error exception
log += f"{0:.2f} "
else:
log += f"{category_correct[i] / category_count[i]:.2f} "
return log
total_test_result = test_result[:4]
category_test_result = test_result[4:]
train_log = log_line(*train_result)
total_test_log = log_line(*total_test_result)
category_test_log = category_log_line(*category_test_result)
category_test_log_for_monitor = category_log_line_for_monitor(*category_test_result)
print(epoch, train_log, blue(total_test_log), category_test_log_for_monitor)
if file:
log = f"{epoch} {train_log} {total_test_log} {category_test_log}\n"
file.write(log)
def get_log_file(experiment_type: str, dataset_type: str, train_shape: str, validation_shape: str = None,
test_shape=None):
now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
directory = PROJECT_ROOT / 'result' / experiment_type / dataset_type
if experiment_type == "train":
file_name = f"{train_shape}_{now}.txt"
start_log = f"The {experiment_type.capitalize()} Experiment for {train_shape.capitalize()} is started at {now}."
else: # experiment_type == "test"
file_name = f"{train_shape}_{test_shape}_{now}.txt"
start_log = f"The {experiment_type.capitalize()} Experiment from {train_shape.capitalize()} to {test_shape.capitalize()} is started at {now}."
print(start_log)
file = open(directory / file_name, "w")
if experiment_type == "train":
index = f"Epoch Train_Loss Train_Accuracy Validation_Loss Validation_Accuracy\n"
else: # experiment_type == "test"
index = f"Test_Loss Test_Accuracy\n"
file.write(index)
print(index, end="")
return file
def get_log_for_auto_encoder(dataset_type: str, loss_type="ce"):
now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
directory = PROJECT_ROOT / 'result/train/' / dataset_type
file_name = f"{loss_type}_{now}.txt"
start_log = f"The {loss_type.capitalize()} Experiment for is started at {now}."
print(start_log)
file = open(directory / file_name, "w")
if loss_type == "ce":
index = f"Epoch Train_CE Train_Accuracy Validation_CE Validation_Accuracy\n"
file.write(index)
print(index, end="")
elif loss_type == 'cd':
index = f"Epoch Train_CD Validation_CD \n"
print(index, end="")
return file
def get_log_for_CE_CD(dataset_type: str, loss_type="ce_cd"):
now = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
directory = PROJECT_ROOT / 'result/train/' / dataset_type
file_name = f"{loss_type}_{now}.txt"
start_log = f"The {loss_type.capitalize()} Experiment for is started at {now}."
print(start_log)
file = open(directory / file_name, "w")
index = f"Epoch Train_CE_CD Train_CE Train_Accuracy Train_CD Validation_CE_CD Validation_CE Validation_Accuracy Validation_CD\n"
file.write(index)
print(index, end="")
return file
def logging_for_CD_CE(file, epoch, train_result, validation_result):
def log_line_CD_CE(loss, batch_index):
return f"{loss / batch_index:.6f} "
def log_line_CD(loss, batch_index):
return f"{loss / batch_index * 10_000:.6f} "
def log_line(loss, batch_index, correct, count):
return f"{loss / batch_index:.6f} {correct / count:.6f} "
def category_log_line_for_monitor(category_correct, category_count):
log = ""
for i in range(len(category_correct)):
if category_count[i] == 0: # for reduced MVP12 zero division error exception
log += f"{CATEGORY[i]}-None "
else:
log += f"{CATEGORY[i]}-{category_correct[i] / category_count[i]:.2f} "
return log
def category_log_line(category_correct, category_count):
log = "# "
for i in range(len(category_correct)):
if category_count[i] == 0: # for reduced MVP12 zero division error exception
log += f"None "
else:
log += f"{category_correct[i] / category_count[i]:.2f} "
return log + "# "
def log(result):
CD_CE = result[:2]
CE = result[2:6]
CE_category = result[6:8]
CD = result[8:]
CD_CE_log = log_line_CD_CE(*CD_CE)
CE_log = log_line(*CE)
CE_category_log = category_log_line(*CE_category)
CD_log = log_line_CD(*CD)
log = CD_CE_log + CE_log + CE_category_log + CD_log
return log
train_log = log(train_result)
validation_log = log(validation_result)
print(epoch, train_log, blue(validation_log))
if file:
log = f"{epoch} {train_log} {validation_log}\n"
file.write(log)
| 34.938697 | 150 | 0.652922 | 1,249 | 9,119 | 4.463571 | 0.073659 | 0.042691 | 0.040179 | 0.02583 | 0.818296 | 0.78278 | 0.755157 | 0.739372 | 0.717309 | 0.693812 | 0 | 0.012027 | 0.234127 | 9,119 | 260 | 151 | 35.073077 | 0.786226 | 0.047703 | 0 | 0.698413 | 0 | 0.005291 | 0.218725 | 0.081056 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.021164 | 0.047619 | 0.275132 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
db195d437aa333e93edddefed3ba0a863a1872b8 | 4,284 | py | Python | event_processor_test.py | gabriel-aranha/event-processor | a6addd730491bd3144da4f5b842feda3544f279a | [
"MIT"
] | null | null | null | event_processor_test.py | gabriel-aranha/event-processor | a6addd730491bd3144da4f5b842feda3544f279a | [
"MIT"
] | null | null | null | event_processor_test.py | gabriel-aranha/event-processor | a6addd730491bd3144da4f5b842feda3544f279a | [
"MIT"
] | null | null | null | import pytest
import event_processor
import json_schema
def test_validate_json_base_schema_correct():
json_message = {
"id": "d19f29a0-9869-4bee-8651-8927520f2b6b",
"client_id": "4414c8f0-b645-4bc5-9d78-1375c7ea159a",
"event_type": "issue-credit-card",
"payload": {
"credit_limit": 2400,
"processor": "MasterCard"
}
}
base_validation_schema = json_schema.get_base_message_schema()
base_result = event_processor.validate_json_message_schema(
json_message, base_validation_schema)
assert base_result == {'status': 'ok'}
def test_validate_json_base_schema_missing_key():
json_message = {
"id": "d19f29a0-9869-4bee-8651-8927520f2b6b",
"client_id": "4414c8f0-b645-4bc5-9d78-1375c7ea159a",
"payload": {
"credit_limit": 2400,
"processor": "MasterCard"
}
}
base_validation_schema = json_schema.get_base_message_schema()
base_result = event_processor.validate_json_message_schema(
json_message, base_validation_schema)
assert base_result['error']['message'] == "'event_type' is a required property"
def test_validate_json_base_schema_wrong_type():
json_message = {
"id": "d19f29a0-9869-4bee-8651-8927520f2b6b",
"client_id": 555555555,
"event_type": "issue-credit-card",
"payload": {
"credit_limit": 2400,
"processor": "MasterCard"
}
}
base_validation_schema = json_schema.get_base_message_schema()
base_result = event_processor.validate_json_message_schema(
json_message, base_validation_schema)
assert base_result['error']['message'] == "555555555 is not of type 'string'"
def test_validate_json_payload_schema_correct_event():
json_message = {
"id": "d19f29a0-9869-4bee-8651-8927520f2b6b",
"client_id": "4414c8f0-b645-4bc5-9d78-1375c7ea159a",
"event_type": "issue-credit-card",
"payload": {
"credit_limit": 2400,
"processor": "MasterCard"
}
}
payload_validation_schema = json_schema.get_event_type_schema(
'issue-credit-card')
payload_result = event_processor.validate_json_message_schema(
json_message['payload'], payload_validation_schema)
assert payload_result == {'status': 'ok'}
def test_validate_json_payload_schema_wrong_event():
json_message = {
"id": "d19f29a0-9869-4bee-8651-8927520f2b6b",
"client_id": "4414c8f0-b645-4bc5-9d78-1375c7ea159a",
"event_type": "issue-credit-card",
"payload": {
"credit_limit": 2400,
"processor": "MasterCard"
}
}
payload_validation_schema = json_schema.get_event_type_schema(
'transaction')
payload_result = event_processor.validate_json_message_schema(
json_message['payload'], payload_validation_schema)
assert payload_result['error']['message'] == "'amount' is a required property"
def test_validate_json_payload_schema_missing_key():
json_message = {
"id": "d19f29a0-9869-4bee-8651-8927520f2b6b",
"client_id": "4414c8f0-b645-4bc5-9d78-1375c7ea159a",
"event_type": "issue-credit-card",
"payload": {
"processor": "MasterCard"
}
}
payload_validation_schema = json_schema.get_event_type_schema(
'issue-credit-card')
payload_result = event_processor.validate_json_message_schema(
json_message['payload'], payload_validation_schema)
assert payload_result['error']['message'] == "'credit_limit' is a required property"
def test_validate_json_payload_schema_wrong_type():
json_message = {
"id": "d19f29a0-9869-4bee-8651-8927520f2b6b",
"client_id": "4414c8f0-b645-4bc5-9d78-1375c7ea159a",
"event_type": "issue-credit-card",
"payload": {
"credit_limit": "2400",
"processor": "MasterCard"
}
}
payload_validation_schema = json_schema.get_event_type_schema(
'issue-credit-card')
payload_result = event_processor.validate_json_message_schema(
json_message['payload'], payload_validation_schema)
assert payload_result['error']['message'] == "'2400' is not of type 'number'"
| 31.5 | 88 | 0.664332 | 465 | 4,284 | 5.741935 | 0.113978 | 0.086517 | 0.050562 | 0.074157 | 0.941948 | 0.941948 | 0.916105 | 0.889139 | 0.874906 | 0.874906 | 0 | 0.101224 | 0.218254 | 4,284 | 135 | 89 | 31.733333 | 0.696029 | 0 | 0 | 0.669903 | 0 | 0 | 0.302754 | 0.109244 | 0 | 0 | 0 | 0 | 0.067961 | 1 | 0.067961 | false | 0 | 0.029126 | 0 | 0.097087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
e1fe3ba2870969045ef6ed9b31ef3cca44ddbf2f | 27 | py | Python | solvers/gpt3/lm_solve/__init__.py | tiendat101001/PythonProgrammingPuzzles | e4a6504bf783ad1ab93686cedd5d1818af92a5e4 | [
"MIT"
] | 814 | 2021-06-03T20:07:59.000Z | 2022-03-25T09:31:32.000Z | solvers/gpt3/lm_solve/__init__.py | xu753x/PythonProgrammingPuzzles | 506099b8664db2ddefaf1c41cb151743b3751ab3 | [
"MIT"
] | 16 | 2021-06-11T18:30:34.000Z | 2021-09-24T03:48:10.000Z | solvers/gpt3/lm_solve/__init__.py | xu753x/PythonProgrammingPuzzles | 506099b8664db2ddefaf1c41cb151743b3751ab3 | [
"MIT"
] | 78 | 2021-06-11T17:17:14.000Z | 2022-02-14T06:47:40.000Z | from lm_solve.run import *
| 13.5 | 26 | 0.777778 | 5 | 27 | 4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148148 | 27 | 1 | 27 | 27 | 0.869565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c02675f5c7c6c6711ea95ffc77698db6cedb78f5 | 65 | py | Python | epidemioptim/optimization/__init__.py | hyerinshelly/RL_COVID-19_Korea | da95b6713969271043a44c10b84f9407f038b71a | [
"MIT"
] | 4 | 2020-10-21T08:29:32.000Z | 2021-05-10T04:56:58.000Z | epidemioptim/optimization/__init__.py | hyerinshelly/RL_COVID-19_Korea | da95b6713969271043a44c10b84f9407f038b71a | [
"MIT"
] | null | null | null | epidemioptim/optimization/__init__.py | hyerinshelly/RL_COVID-19_Korea | da95b6713969271043a44c10b84f9407f038b71a | [
"MIT"
] | 8 | 2021-01-21T02:14:24.000Z | 2022-03-14T07:54:06.000Z | from epidemioptim.optimization.get_algorithm import get_algorithm | 65 | 65 | 0.923077 | 8 | 65 | 7.25 | 0.75 | 0.413793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046154 | 65 | 1 | 65 | 65 | 0.935484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 6 |
c02a49bd266066d8e377a321afc0ab0a46ac3674 | 26,747 | py | Python | sktime/classification/early_classification/base.py | biologioholic/sktime | 9d0391a04b11d22bd783b452f01aa5b4529b41a2 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T02:45:39.000Z | 2021-12-22T02:45:39.000Z | sktime/classification/early_classification/base.py | biologioholic/sktime | 9d0391a04b11d22bd783b452f01aa5b4529b41a2 | [
"BSD-3-Clause"
] | null | null | null | sktime/classification/early_classification/base.py | biologioholic/sktime | 9d0391a04b11d22bd783b452f01aa5b4529b41a2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""
Abstract base class for early time series classifiers.
class name: BaseEarlyClassifier
Defining methods:
fitting - fit(self, X, y)
predicting - predict(self, X)
- predict_proba(self, X)
updating predictions - update_predict(self, X)
(streaming) - update_predict_proba(self, X)
Inherited inspection methods:
hyper-parameter inspection - get_params()
fitted parameter inspection - get_fitted_params()
State:
fitted model/strategy - by convention, any attributes ending in "_"
fitted state flag - is_fitted (property)
fitted state inspection - check_is_fitted()
streaming decision info - state_info attribute
"""
__all__ = [
"BaseEarlyClassifier",
]
__author__ = ["mloning", "fkiraly", "TonyBagnall", "MatthewMiddlehurst"]
from abc import ABC, abstractmethod
from typing import Tuple
import numpy as np
from sktime.base import BaseEstimator
from sktime.classification import BaseClassifier
class BaseEarlyClassifier(BaseEstimator, ABC):
"""Abstract base class for early time series classifiers.
The base classifier specifies the methods and method signatures that all
early classifiers have to implement. Attributes with an underscore suffix are set in
the method fit.
Parameters
----------
classes_ : ndarray of class labels, possibly strings
n_classes_ : integer, number of classes (length of classes_)
fit_time_ : integer, time (in milliseconds) for fit to run.
_class_dictionary : dictionary mapping classes_ onto integers 0...n_classes_-1.
_threads_to_use : number of threads to use in fit as determined by n_jobs.
state_info : An array containing the state info for each decision in X.
"""
_tags = {
"X_inner_mtype": "numpy3D", # which type do _fit/_predict, support for X?
# it should be either "numpy3D" or "nested_univ" (nested pd.DataFrame)
"capability:multivariate": False,
"capability:unequal_length": False,
"capability:missing_values": False,
"capability:multithreading": False,
}
def __init__(self):
self.classes_ = []
self.n_classes_ = 0
self.fit_time_ = 0
self._class_dictionary = {}
self._threads_to_use = 1
"""
An array containing the state info for each decision in X from update and
predict methods. Contains classifier dependant information for future decisions
on the data and information on when a cases decision has been made. Each row
contains information for a case from the latest decision on its safety made in
update/predict. Successive updates are likely to remove rows from the
state_info, as it will only store as many rows as there are input instances to
update/predict.
"""
self.state_info = None
super(BaseEarlyClassifier, self).__init__()
def fit(self, X, y):
"""Fit time series classifier to training data.
Parameters
----------
X : 3D np.array (any number of dimensions, equal length series)
of shape [n_instances, n_dimensions, series_length]
or 2D np.array (univariate, equal length series)
of shape [n_instances, series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series
(any number of dimensions, equal or unequal length series)
or of any other supported Panel mtype
for list of mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
y : 1D np.array of int, of shape [n_instances] - class labels for fitting
indices correspond to instance indices in X
Returns
-------
self : Reference to self.
Notes
-----
Changes state by creating a fitted model that updates attributes
ending in "_" and sets is_fitted flag to True.
"""
fit = BaseClassifier.fit
return fit(self, X, y)
def predict(self, X) -> Tuple[np.ndarray, np.ndarray]:
"""Predicts labels for sequences in X.
Early classifiers can predict at series lengths shorter than the train data
series length.
Predict will return -1 for cases which it cannot make a decision on yet. The
output is only guaranteed to return a valid class label for all cases when
using the full series length.
Parameters
----------
X : 3D np.array (any number of dimensions, equal length series)
of shape [n_instances, n_dimensions, series_length]
or 2D np.array (univariate, equal length series)
of shape [n_instances, series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series
(any number of dimensions, equal or unequal length series)
or of any other supported Panel mtype
for list of mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 1D np.array of int, of shape [n_instances] - predicted class labels
indices correspond to instance indices in X
decisions : 1D bool array
An array of booleans, containing the decision of whether a prediction is
safe to use or not.
i-th entry is the classifier decision that i-th instance safe to use
"""
self.check_is_fitted()
# boilerplate input checks for predict-like methods
X = self._check_convert_X_for_predict(X)
return self._predict(X)
def update_predict(self, X) -> Tuple[np.ndarray, np.ndarray]:
"""Update label prediction for sequences in X at a larger series length.
Uses information stored in the classifiers state from previous predictions and
updates at shorter series lengths. Update will only accept cases which have not
yet had a decision made, cases which have had a positive decision should be
removed from the input with the row ordering preserved.
If no state information is present, predict will be called instead.
Prediction updates will return -1 for cases which it cannot make a decision on
yet. The output is only guaranteed to return a valid class label for all cases
when using the full series length.
Parameters
----------
X : 3D np.array (any number of dimensions, equal length series)
of shape [n_instances, n_dimensions, series_length]
or 2D np.array (univariate, equal length series)
of shape [n_instances, series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series
(any number of dimensions, equal or unequal length series)
or of any other supported Panel mtype
for list of mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 1D np.array of int, of shape [n_instances] - predicted class labels
indices correspond to instance indices in X
decisions : 1D bool array
An array of booleans, containing the decision of whether a prediction is
safe to use or not.
i-th entry is the classifier decision that i-th instance safe to use
"""
self.check_is_fitted()
# boilerplate input checks for predict-like methods
X = self._check_convert_X_for_predict(X)
if self.state_info is None:
return self._predict(X)
else:
return self._update_predict(X)
def predict_proba(self, X) -> Tuple[np.ndarray, np.ndarray]:
"""Predicts labels probabilities for sequences in X.
Early classifiers can predict at series lengths shorter than the train data
series length.
Probability predictions will return [-1]*n_classes_ for cases which it cannot
make a decision on yet. The output is only guaranteed to return a valid class
label for all cases when using the full series length.
Parameters
----------
X : 3D np.array (any number of dimensions, equal length series)
of shape [n_instances, n_dimensions, series_length]
or 2D np.array (univariate, equal length series)
of shape [n_instances, series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series
(any number of dimensions, equal or unequal length series)
or of any other supported Panel mtype
for list of mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 2D array of shape [n_instances, n_classes] - predicted class probabilities
1st dimension indices correspond to instance indices in X
2nd dimension indices correspond to possible labels (integers)
(i, j)-th entry is predictive probability that i-th instance is of class j
decisions : 1D bool array
An array of booleans, containing the decision of whether a prediction is
safe to use or not.
i-th entry is the classifier decision that i-th instance safe to use
"""
self.check_is_fitted()
# boilerplate input checks for predict-like methods
X = self._check_convert_X_for_predict(X)
return self._predict_proba(X)
def update_predict_proba(self, X) -> Tuple[np.ndarray, np.ndarray]:
"""Update label probabilities for sequences in X at a larger series length.
Uses information stored in the classifiers state from previous predictions and
updates at shorter series lengths. Update will only accept cases which have not
yet had a decision made, cases which have had a positive decision should be
removed from the input with the row ordering preserved.
If no state information is present, predict_proba will be called instead.
Probability predictions updates will return [-1]*n_classes_ for cases which it
cannot make a decision on yet. The output is only guaranteed to return a valid
class label for all cases when using the full series length.
Parameters
----------
X : 3D np.array (any number of dimensions, equal length series)
of shape [n_instances, n_dimensions, series_length]
or 2D np.array (univariate, equal length series)
of shape [n_instances, series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series
(any number of dimensions, equal or unequal length series)
or of any other supported Panel mtype
for list of mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 2D array of shape [n_instances, n_classes] - predicted class probabilities
1st dimension indices correspond to instance indices in X
2nd dimension indices correspond to possible labels (integers)
(i, j)-th entry is predictive probability that i-th instance is of class j
decisions : 1D bool array
An array of booleans, containing the decision of whether a prediction is
safe to use or not.
i-th entry is the classifier decision that i-th instance safe to use
"""
self.check_is_fitted()
# boilerplate input checks for predict-like methods
X = self._check_convert_X_for_predict(X)
if self.state_info is None:
return self._predict_proba(X)
else:
return self._update_predict_proba(X)
def score(self, X, y) -> Tuple[float, float, float]:
"""Scores predicted labels against ground truth labels on X.
Parameters
----------
X : 3D np.array (any number of dimensions, equal length series)
of shape [n_instances, n_dimensions, series_length]
or 2D np.array (univariate, equal length series)
of shape [n_instances, series_length]
or pd.DataFrame with each column a dimension, each cell a pd.Series
(any number of dimensions, equal or unequal length series)
or of any other supported Panel mtype
for list of mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
y : 1D np.ndarray of int, of shape [n_instances] - class labels (ground truth)
indices correspond to instance indices in X
Returns
-------
Tuple of floats, harmonic mean, accuracy and earliness scores of predict(X) vs y
"""
self.check_is_fitted()
# boilerplate input checks for predict-like methods
X = self._check_convert_X_for_predict(X)
return self._score(X, y)
def get_state_info(self):
"""Return the state information generated from the last predict/update call.
Returns
-------
An array containing the state info for each decision in X from update and
predict methods. Contains classifier dependant information for future decisions
on the data and information on when a cases decision has been made. Each row
contains information for a case from the latest decision on its safety made in
update/predict. Successive updates are likely to remove rows from the
state_info, as it will only store as many rows as there are input instances to
update/predict.
"""
return self.state_info
def reset_state_info(self):
"""Reset the state information used in update methods."""
self.state_info = None
@staticmethod
def filter_X(X, decisions):
"""Remove True cases from X given a boolean array of decisions."""
inv_dec = np.invert(decisions)
return X[inv_dec]
@staticmethod
def filter_X_y(X, y, decisions):
"""Remove True cases from X and y given a boolean array of decisions."""
inv_dec = np.invert(decisions)
return X[inv_dec], y[inv_dec]
@staticmethod
def split_indices(indices, decisions):
"""Split a list of indices given a boolean array of decisions."""
inv_dec = np.invert(decisions)
return indices[inv_dec], indices[decisions]
@staticmethod
def split_indices_and_filter(X, indices, decisions):
"""Remove True cases and split a list of indices given an array of decisions."""
inv_dec = np.invert(decisions)
return X[inv_dec], indices[inv_dec], indices[decisions]
@abstractmethod
def _fit(self, X, y):
"""Fit time series classifier to training data.
Abstract method, must be implemented.
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
y : 1D np.array of int, of shape [n_instances] - class labels for fitting
indices correspond to instance indices in X
Returns
-------
self :
Reference to self.
Notes
-----
Changes state by creating a fitted model that updates attributes
ending in "_" and sets is_fitted flag to True.
"""
...
@abstractmethod
def _predict(self, X) -> Tuple[np.ndarray, np.ndarray]:
"""Predicts labels for sequences in X.
Abstract method, must be implemented.
This method should update state_info with any values necessary to make future
decisions. It is recommended that the previous time stamp used for each case
should be stored in the state_info. The number of rows in state_info after the
method has been called should match the number of input rows.
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 1D np.array of int, of shape [n_instances] - predicted class labels
indices correspond to instance indices in X
decisions : 1D bool array
An array of booleans, containing the decision of whether a prediction is
safe to use or not.
i-th entry is the classifier decision that i-th instance safe to use
"""
...
@abstractmethod
def _update_predict(self, X) -> Tuple[np.ndarray, np.ndarray]:
"""Update label prediction for sequences in X at a larger series length.
Abstract method, must be implemented.
Uses information from previous decisions stored in state_info. This method
should update state_info with any values necessary to make future decisions.
It is recommended that the previous time stamp used for each case should be
stored in the state_info. The number of rows in state_info after the method has
been called should match the number of input rows.
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 1D np.array of int, of shape [n_instances] - predicted class labels
indices correspond to instance indices in X
decisions : 1D bool array
An array of booleans, containing the decision of whether a prediction is
safe to use or not.
i-th entry is the classifier decision that i-th instance safe to use
"""
...
def _predict_proba(self, X) -> Tuple[np.ndarray, np.ndarray]:
"""Predicts labels probabilities for sequences in X.
This method should update state_info with any values necessary to make future
decisions. It is recommended that the previous time stamp used for each case
should be stored in the state_info. The number of rows in state_info after the
method has been called should match the number of input rows.
Default behaviour is to call _predict and set the predicted class probability
to 1, other class probabilities to 0 if a positive decision is made. Override if
better estimates are obtainable.
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 2D array of shape [n_instances, n_classes] - predicted class probabilities
1st dimension indices correspond to instance indices in X
2nd dimension indices correspond to possible labels (integers)
(i, j)-th entry is predictive probability that i-th instance is of class j
decisions : 1D bool array
An array of booleans, containing the decision of whether a prediction is
safe to use or not.
i-th entry is the classifier decision that i-th instance safe to use
"""
dists = np.zeros((X.shape[0], self.n_classes_))
preds, decisions = self._predict(X)
for i in range(0, X.shape[0]):
if decisions[i]:
dists[i, self._class_dictionary[preds[i]]] = 1
else:
dists[i, :] = -1
return dists, decisions
def _update_predict_proba(self, X) -> Tuple[np.ndarray, np.ndarray]:
"""Update label probabilities for sequences in X at a larger series length.
Uses information from previous decisions stored in state_info. This method
should update state_info with any values necessary to make future decisions.
It is recommended that the previous time stamp used for each case should be
stored in the state_info. The number of rows in state_info after the method has
been called should match the number of input rows.
Default behaviour is to call _update_predict and set the predicted class
probability to 1, other class probabilities to 0 if a positive decision is made.
Override if better estimates are obtainable.
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 2D array of shape [n_instances, n_classes] - predicted class probabilities
1st dimension indices correspond to instance indices in X
2nd dimension indices correspond to possible labels (integers)
(i, j)-th entry is predictive probability that i-th instance is of class j
decisions : 1D bool array
An array of booleans, containing the decision of whether a prediction is
safe to use or not.
i-th entry is the classifier decision that i-th instance safe to use
"""
dists = np.zeros((X.shape[0], self.n_classes_))
preds, decisions = self._update_predict(X)
for i in range(0, X.shape[0]):
if decisions[i]:
dists[i, self._class_dictionary[preds[i]]] = 1
else:
dists[i, :] = -1
return dists, decisions
@abstractmethod
def _score(self, X, y) -> Tuple[float, float, float]:
"""Scores predicted labels against ground truth labels on X.
Abstract method, must be implemented.
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
y : 1D np.array of int, of shape [n_instances] - class labels for fitting
indices correspond to instance indices in X
Returns
-------
Tuple of floats, harmonic mean, accuracy and earliness scores of predict(X) vs y
"""
...
def _check_convert_X_for_predict(self, X):
"""Input checks, capability checks, repeated in all predict/score methods.
Parameters
----------
X : any object (to check/convert)
should be of a supported Panel mtype or 2D numpy.ndarray
Returns
-------
X: an object of a supported Panel mtype, numpy3D if X was a 2D numpy.ndarray
Raises
------
ValueError if X is of invalid input data type, or there is not enough data
ValueError if the capabilities in self._tags do not handle the data.
"""
_check_convert_X_for_predict = BaseClassifier._check_convert_X_for_predict
return _check_convert_X_for_predict(self, X)
def _check_capabilities(self, missing, multivariate, unequal):
"""Check whether this classifier can handle the data characteristics.
Parameters
----------
missing : boolean, does the data passed to fit contain missing values?
multivariate : boolean, does the data passed to fit contain missing values?
unequal : boolea, do the time series passed to fit have variable lengths?
Raises
------
ValueError if the capabilities in self._tags do not handle the data.
"""
_check_capabilities = BaseClassifier._check_capabilities
return _check_capabilities(self, missing, multivariate, unequal)
def _convert_X(self, X):
"""Convert equal length series from DataFrame to numpy array or vice versa.
Parameters
----------
self : this classifier
X : pd.DataFrame or np.ndarray. Input attribute data
Returns
-------
X : input X converted to type in "X_inner_mtype" tag
usually a pd.DataFrame (nested) or 3D np.ndarray
Checked and possibly converted input data
"""
_convert_X = BaseClassifier._convert_X
return _convert_X(self, X)
| 43.561889 | 88 | 0.645418 | 3,540 | 26,747 | 4.763559 | 0.093503 | 0.012453 | 0.014232 | 0.030244 | 0.830042 | 0.817707 | 0.800984 | 0.797664 | 0.790251 | 0.790251 | 0 | 0.00438 | 0.291509 | 26,747 | 613 | 89 | 43.632953 | 0.885488 | 0.701798 | 0 | 0.442478 | 0 | 0 | 0.039003 | 0.021235 | 0 | 0 | 0 | 0 | 0 | 1 | 0.19469 | false | 0 | 0.044248 | 0 | 0.415929 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c0367ac2523b07f09b914e1d4c3918042f78f026 | 193 | py | Python | bankruptcy/__init__.py | freelawproject/document-parser | 3ad36951f7e2183e6c32a271530025a606ea7e16 | [
"BSD-2-Clause"
] | 3 | 2021-03-02T04:45:01.000Z | 2021-04-28T14:28:51.000Z | bankruptcy/__init__.py | freelawproject/document-parser | 3ad36951f7e2183e6c32a271530025a606ea7e16 | [
"BSD-2-Clause"
] | 36 | 2021-03-22T13:30:55.000Z | 2022-03-22T18:13:06.000Z | bankruptcy/__init__.py | freelawproject/document-parser | 3ad36951f7e2183e6c32a271530025a606ea7e16 | [
"BSD-2-Clause"
] | null | null | null | # __init__.py
from .parser import (
extract_all,
extract_official_form_106_a_b,
extract_official_form_106_d,
extract_official_form_106_e_f,
extract_official_form_106_sum,
)
| 21.444444 | 34 | 0.782383 | 29 | 193 | 4.413793 | 0.551724 | 0.46875 | 0.59375 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.074534 | 0.165803 | 193 | 8 | 35 | 24.125 | 0.720497 | 0.056995 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.142857 | 0 | 0.142857 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
c0390e57bd269973cd03013332db0e2eb5f0c3e1 | 96 | py | Python | venv/lib/python3.8/site-packages/pip/_internal/cli/__init__.py | GiulianaPola/select_repeats | 17a0d053d4f874e42cf654dd142168c2ec8fbd11 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/pip/_internal/cli/__init__.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/pip/_internal/cli/__init__.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/16/41/c1/829c716fefe077aaf51639cd85f30ecc0518c97a17289e9a6e28df7055 | 96 | 96 | 0.895833 | 9 | 96 | 9.555556 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.4375 | 0 | 96 | 1 | 96 | 96 | 0.458333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
222ceb2e80b28fc60d294ef27e10e3c0c676e213 | 143 | py | Python | sh_logistics/logistics/doctype/job/test_job.py | sahalMoidu/sh_logistics | 679e510a295bc44f85a5eeb781bb98eeacaf0acf | [
"MIT"
] | null | null | null | sh_logistics/logistics/doctype/job/test_job.py | sahalMoidu/sh_logistics | 679e510a295bc44f85a5eeb781bb98eeacaf0acf | [
"MIT"
] | null | null | null | sh_logistics/logistics/doctype/job/test_job.py | sahalMoidu/sh_logistics | 679e510a295bc44f85a5eeb781bb98eeacaf0acf | [
"MIT"
] | null | null | null | # Copyright (c) 2022, softwarehut and Contributors
# See license.txt
# import frappe
import unittest
class TestJob(unittest.TestCase):
pass
| 15.888889 | 50 | 0.776224 | 18 | 143 | 6.166667 | 0.888889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032787 | 0.146853 | 143 | 8 | 51 | 17.875 | 0.877049 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 6 |
224109a94105b711a2b176990c347c95b6e63bb8 | 178 | py | Python | exam/wizard/__init__.py | kyaranusa/School-Management-Systems | d6cd71037fe46c08feff32f42af61f56eb25a7c7 | [
"MIT"
] | null | null | null | exam/wizard/__init__.py | kyaranusa/School-Management-Systems | d6cd71037fe46c08feff32f42af61f56eb25a7c7 | [
"MIT"
] | null | null | null | exam/wizard/__init__.py | kyaranusa/School-Management-Systems | d6cd71037fe46c08feff32f42af61f56eb25a7c7 | [
"MIT"
] | 1 | 2020-11-17T03:25:10.000Z | 2020-11-17T03:25:10.000Z | # See LICENSE file for full copyright and licensing details.
from . import subject_result
from . import move_standards
from . import batch_result
from . import terminate_reason
| 25.428571 | 60 | 0.814607 | 25 | 178 | 5.64 | 0.72 | 0.283688 | 0.22695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.151685 | 178 | 6 | 61 | 29.666667 | 0.933775 | 0.325843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
22553677afe47f8bf2b31d9c5ffb41a89732229e | 58,119 | py | Python | pysal/model/spreg/error_sp_hom.py | ocefpaf/pysal | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | [
"BSD-3-Clause"
] | 1 | 2021-08-16T02:47:35.000Z | 2021-08-16T02:47:35.000Z | pysal/model/spreg/error_sp_hom.py | ocefpaf/pysal | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | [
"BSD-3-Clause"
] | null | null | null | pysal/model/spreg/error_sp_hom.py | ocefpaf/pysal | 7e397bdb4c22d4e2442b4ee88bcd691d2421651d | [
"BSD-3-Clause"
] | null | null | null | '''
Hom family of models based on: :cite:`Drukker2013`
Following: :cite:`Anselin2011`
'''
__author__ = "Luc Anselin luc.anselin@asu.edu, Daniel Arribas-Bel darribas@asu.edu"
from scipy import sparse as SP
import numpy as np
from numpy import linalg as la
from . import ols as OLS
from pysal.lib.weights.spatial_lag import lag_spatial
from .utils import power_expansion, set_endog, iter_msg, sp_att
from .utils import get_A1_hom, get_A2_hom, get_A1_het, optim_moments
from .utils import get_spFilter, get_lags, _moments2eqs
from .utils import spdot, RegressionPropsY, set_warn
from . import twosls as TSLS
from . import user_output as USER
from . import summary_output as SUMMARY
__all__ = ["GM_Error_Hom", "GM_Endog_Error_Hom", "GM_Combo_Hom"]
class BaseGM_Error_Hom(RegressionPropsY):
'''
GMM method for a spatial error model with homoskedasticity (note: no
consistency checks, diagnostics or constant added); based on
Drukker et al. (2013) :cite:`Drukker2013`, following Anselin (2011) :cite:`Anselin2011`.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011) (default). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
xtx : float
X'X
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
Model commands
>>> reg = BaseGM_Error_Hom(y, X, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 47.9479 12.3021]
[ 0.7063 0.4967]
[ -0.556 0.179 ]
[ 0.4129 0.1835]]
>>> print np.around(reg.vm, 4) #doctest: +SKIP
[[ 1.51340700e+02 -5.29060000e+00 -1.85650000e+00 -2.40000000e-03]
[ -5.29060000e+00 2.46700000e-01 5.14000000e-02 3.00000000e-04]
[ -1.85650000e+00 5.14000000e-02 3.21000000e-02 -1.00000000e-04]
[ -2.40000000e-03 3.00000000e-04 -1.00000000e-04 3.37000000e-02]]
'''
def __init__(self, y, x, w,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
if A1 == 'hom':
wA1 = get_A1_hom(w)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w)
wA2 = get_A2_hom(w)
# 1a. OLS --> \tilde{\delta}
ols = OLS.BaseOLS(y=y, x=x)
self.x, self.y, self.n, self.k, self.xtx = ols.x, ols.y, ols.n, ols.k, ols.xtx
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w, wA1, wA2, ols.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. SWLS --> \hat{\delta}
x_s = get_spFilter(w, lambda_old, self.x)
y_s = get_spFilter(w, lambda_old, self.y)
ols_s = OLS.BaseOLS(y=y_s, x=x_s)
self.predy = spdot(self.x, ols_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w, wA1, wA2, self.u)
psi = get_vc_hom(w, wA1, wA2, self, lambda_old)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((ols_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom_ols(
w, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * w * self.u
self._cache = {}
class GM_Error_Hom(BaseGM_Error_Hom):
'''
GMM method for a spatial error model with homoskedasticity, with results
and diagnostics; based on Drukker et al. (2013) :cite:`Drukker2013`, following Anselin
(2011) :cite:`Anselin2011`.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : pysal W object
Spatial weights object
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
xtx : float
X'X
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal.lib
Open data on Columbus neighborhood crime (49 areas) using pysal.lib.io.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.lib.io.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) and CRIME (crime) vectors from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("CRIME"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Error_Hom(y, X, w=w, A1='hom_sc', name_y='home value', name_x=['income', 'crime'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``spreg.error_sp``, it allows for inference on the spatial
parameter. This is why you obtain as many coefficient estimates as
standard errors, which you calculate taking the square root of the
diagonal of the variance-covariance matrix of the parameters:
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 47.9479 12.3021]
[ 0.7063 0.4967]
[ -0.556 0.179 ]
[ 0.4129 0.1835]]
'''
def __init__(self, y, x, w,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Error_Hom.__init__(self, y=y, x=x_constant, w=w.sparse, A1=A1,
max_iter=max_iter, epsilon=epsilon)
self.title = "SPATIALLY WEIGHTED LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Error_Hom(reg=self, w=w, vm=vm)
class BaseGM_Endog_Error_Hom(RegressionPropsY):
'''
GMM method for a spatial error model with homoskedasticity and
endogenous variables (note: no consistency checks, diagnostics or constant
added); based on Drukker et al. (2013) :cite:`Drukker2013`, following Anselin (2011)
:cite:`Anselin2011`.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
hth : float
H'H
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> X = np.hstack((np.ones(y.shape),X))
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> reg = BaseGM_Endog_Error_Hom(y, X, yd, q, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 55.3658 23.496 ]
[ 0.4643 0.7382]
[ -0.669 0.3943]
[ 0.4321 0.1927]]
'''
def __init__(self, y, x, yend, q, w,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
if A1 == 'hom':
wA1 = get_A1_hom(w)
elif A1 == 'hom_sc':
wA1 = get_A1_hom(w, scalarKP=True)
elif A1 == 'het':
wA1 = get_A1_het(w)
wA2 = get_A2_hom(w)
# 1a. S2SLS --> \tilde{\delta}
tsls = TSLS.BaseTSLS(y=y, x=x, yend=yend, q=q)
self.x, self.z, self.h, self.y, self.hth = tsls.x, tsls.z, tsls.h, tsls.y, tsls.hth
self.yend, self.q, self.n, self.k = tsls.yend, tsls.q, tsls.n, tsls.k
# 1b. GM --> \tilde{\rho}
moments = moments_hom(w, wA1, wA2, tsls.u)
lambda1 = optim_moments(moments)
lambda_old = lambda1
self.iteration, eps = 0, 1
while self.iteration < max_iter and eps > epsilon:
# 2a. GS2SLS --> \hat{\delta}
x_s = get_spFilter(w, lambda_old, self.x)
y_s = get_spFilter(w, lambda_old, self.y)
yend_s = get_spFilter(w, lambda_old, self.yend)
tsls_s = TSLS.BaseTSLS(y=y_s, x=x_s, yend=yend_s, h=self.h)
self.predy = spdot(self.z, tsls_s.betas)
self.u = self.y - self.predy
# 2b. GM 2nd iteration --> \hat{\rho}
moments = moments_hom(w, wA1, wA2, self.u)
psi = get_vc_hom(w, wA1, wA2, self, lambda_old, tsls_s.z)[0]
lambda2 = optim_moments(moments, psi)
eps = abs(lambda2 - lambda_old)
lambda_old = lambda2
self.iteration += 1
self.iter_stop = iter_msg(self.iteration, max_iter)
# Output
self.betas = np.vstack((tsls_s.betas, lambda2))
self.vm, self.sig2 = get_omega_hom(
w, wA1, wA2, self, lambda2, moments[0])
self.e_filtered = self.u - lambda2 * w * self.u
self._cache = {}
class GM_Endog_Error_Hom(BaseGM_Endog_Error_Hom):
'''
GMM method for a spatial error model with homoskedasticity and endogenous
variables, with results and diagnostics; based on Drukker et al. (2013)
:cite:`Drukker2013`, following Anselin (2011) :cite:`Anselin2011`.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
sig2 : float
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
hth : float
H'H
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal.lib
Open data on Columbus neighborhood crime (49 areas) using pysal.lib.io.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.lib.io.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
In this case we consider CRIME (crime rates) is an endogenous regressor.
We tell the model that this is so by passing it in a different parameter
from the exogenous variables (x).
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
Because we have endogenous variables, to obtain a correct estimate of the
model, we need to instrument for CRIME. We use DISCBD (distance to the
CBD) for this and hence put it in the instruments parameter, 'q'.
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminars, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous), the
instruments and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Endog_Error_Hom(y, X, yd, q, w=w, A1='hom_sc', name_x=['inc'], name_y='hoval', name_yend=['crime'], name_q=['discbd'], name_ds='columbus')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. This class offers an error model that assumes
homoskedasticity but that unlike the models from
``spreg.error_sp``, it allows for inference on the spatial
parameter. Hence, we find the same number of betas as of standard errors,
which we calculate taking the square root of the diagonal of the
variance-covariance matrix:
>>> print reg.name_z
['CONSTANT', 'inc', 'crime', 'lambda']
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 55.3658 23.496 ]
[ 0.4643 0.7382]
[ -0.669 0.3943]
[ 0.4321 0.1927]]
'''
def __init__(self, y, x, yend, q, w,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
BaseGM_Endog_Error_Hom.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend, q=q,
A1=A1, max_iter=max_iter, epsilon=epsilon)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda') # listing lambda last
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Endog_Error_Hom(reg=self, w=w, vm=vm)
class BaseGM_Combo_Hom(BaseGM_Endog_Error_Hom):
'''
GMM method for a spatial lag and error model with homoskedasticity and
endogenous variables (note: no consistency checks, diagnostics or constant
added); based on Drukker et al. (2013) :cite:`Drukker2013`, following Anselin (2011)
:cite:`Anselin2011`.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : Sparse matrix
Spatial weights sparse matrix
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
Attributes
----------
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
hth : float
H'H
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
>>> w.transform = 'r'
>>> w_lags = 1
>>> yd2, q2 = pysal.model.spreg.utils.set_endog(y, X, w, None, None, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
Example only with spatial lag
>>> reg = BaseGM_Combo_Hom(y, X, yend=yd2, q=q2, w=w.sparse, A1='hom_sc')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 10.1254 15.2871]
[ 1.5683 0.4407]
[ 0.1513 0.4048]
[ 0.2103 0.4226]]
Example with both spatial lag and other endogenous variables
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
>>> yd2, q2 = pysal.model.spreg.utils.set_endog(y, X, w, yd, q, w_lags, True)
>>> X = np.hstack((np.ones(y.shape),X))
>>> reg = BaseGM_Combo_Hom(y, X, yd2, q2, w=w.sparse, A1='hom_sc')
>>> betas = np.array([['CONSTANT'],['inc'],['crime'],['W_hoval'],['lambda']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas, np.sqrt(reg.vm.diagonal()).reshape(5,1))),5)))
[['CONSTANT' '111.7705' '67.75191']
['inc' '-0.30974' '1.16656']
['crime' '-1.36043' '0.6841']
['W_hoval' '-0.52908' '0.84428']
['lambda' '0.60116' '0.18605']]
'''
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
max_iter=1, epsilon=0.00001, A1='hom_sc'):
BaseGM_Endog_Error_Hom.__init__(
self, y=y, x=x, w=w, yend=yend, q=q, A1=A1,
max_iter=max_iter, epsilon=epsilon)
class GM_Combo_Hom(BaseGM_Combo_Hom):
'''
GMM method for a spatial lag and error model with homoskedasticity and
endogenous variables, with results and diagnostics; based on Drukker et
al. (2013) :cite:`Drukker2013`, following Anselin (2011) :cite:`Anselin2011`.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always necessary)
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
max_iter : int
Maximum number of iterations of steps 2a and 2b from Arraiz
et al. Note: epsilon provides an additional stop condition.
epsilon : float
Minimum change in lambda required to stop iterations of
steps 2a and 2b from Arraiz et al. Note: max_iter provides
an additional stop condition.
A1 : string
If A1='het', then the matrix A1 is defined as in Arraiz et
al. If A1='hom', then as in Anselin (2011). If
A1='hom_sc' (default), then as in Drukker, Egger and Prucha (2010)
and Drukker, Prucha and Raciborski (2010).
vm : boolean
If True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable used as instruments
z : array
nxk array of variables (combination of x and yend)
h : array
nxl array of instruments (combination of x and q)
iter_stop : string
Stop criterion reached during iteration of steps 2a and 2b
from Arraiz et al.
iteration : integer
Number of iterations of steps 2a and 2b from Arraiz et al.
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
sig2 : float
Sigma squared used in computations (based on filtered
residuals)
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
hth : float
H'H
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal.lib
Open data on Columbus neighborhood crime (49 areas) using pysal.lib.io.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.lib.io.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
Extract the HOVAL column (home values) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y = np.array(db.by_col("HOVAL"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this class adds a vector of ones to the
independent variables passed in.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.lib.weights.Rook.from_shapefile(pysal.lib.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, his allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
Example only with spatial lag
The Combo class runs an SARAR model, that is a spatial lag+error model.
In this case we will run a simple version of that, where we have the
spatial effects as well as exogenous variables. Since it is a spatial
model, we have to pass in the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> reg = GM_Combo_Hom(y, X, w=w, A1='hom_sc', name_x=['inc'],\
name_y='hoval', name_yend=['crime'], name_q=['discbd'],\
name_ds='columbus')
>>> print np.around(np.hstack((reg.betas,np.sqrt(reg.vm.diagonal()).reshape(4,1))),4)
[[ 10.1254 15.2871]
[ 1.5683 0.4407]
[ 0.1513 0.4048]
[ 0.2103 0.4226]]
This class also allows the user to run a spatial lag+error model with the
extra feature of including non-spatial endogenous regressors. This means
that, in addition to the spatial lag and error, we consider some of the
variables on the right-hand side of the equation as endogenous and we
instrument for this. As an example, we will include CRIME (crime rates) as
endogenous and will instrument with DISCBD (distance to the CSB). We first
need to read in the variables:
>>> yd = []
>>> yd.append(db.by_col("CRIME"))
>>> yd = np.array(yd).T
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
And then we can run and explore the model analogously to the previous combo:
>>> reg = GM_Combo_Hom(y, X, yd, q, w=w, A1='hom_sc', \
name_ds='columbus')
>>> betas = np.array([['CONSTANT'],['inc'],['crime'],['W_hoval'],['lambda']])
>>> print np.hstack((betas, np.around(np.hstack((reg.betas, np.sqrt(reg.vm.diagonal()).reshape(5,1))),5)))
[['CONSTANT' '111.7705' '67.75191']
['inc' '-0.30974' '1.16656']
['crime' '-1.36043' '0.6841']
['W_hoval' '-0.52908' '0.84428']
['lambda' '0.60116' '0.18605']]
'''
def __init__(self, y, x, yend=None, q=None,
w=None, w_lags=1, lag_q=True,
max_iter=1, epsilon=0.00001, A1='hom_sc',
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
yend2, q2 = set_endog(y, x, w, yend, q, w_lags, lag_q)
x_constant = USER.check_constant(x)
BaseGM_Combo_Hom.__init__(
self, y=y, x=x_constant, w=w.sparse, yend=yend2, q=q2,
w_lags=w_lags, A1=A1, lag_q=lag_q,
max_iter=max_iter, epsilon=epsilon)
self.rho = self.betas[-2]
self.predy_e, self.e_pred, warn = sp_att(w, self.y, self.predy,
yend2[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES (HOM)"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_yend.append(USER.set_name_yend_sp(self.name_y))
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda') # listing lambda last
self.name_q = USER.set_name_q(name_q, q)
self.name_q.extend(
USER.set_name_q_sp(self.name_x, w_lags, self.name_q, lag_q))
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.name_w = USER.set_name_w(name_w, w)
SUMMARY.GM_Combo_Hom(reg=self, w=w, vm=vm)
# Functions
def moments_hom(w, wA1, wA2, u):
'''
Compute G and g matrices for the spatial error model with homoscedasticity
as in Anselin :cite:`Anselin2011` (2011).
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
u : array
Residuals. nx1 array assumed to be aligned with w
Attributes
----------
moments : list
List of two arrays corresponding to the matrices 'G' and
'g', respectively.
'''
n = w.shape[0]
A1u = wA1 * u
A2u = wA2 * u
wu = w * u
g1 = np.dot(u.T, A1u)
g2 = np.dot(u.T, A2u)
g = np.array([[g1][0][0], [g2][0][0]]) / n
G11 = 2 * np.dot(wu.T * wA1, u)
G12 = -np.dot(wu.T * wA1, wu)
G21 = 2 * np.dot(wu.T * wA2, u)
G22 = -np.dot(wu.T * wA2, wu)
G = np.array([[G11[0][0], G12[0][0]], [G21[0][0], G22[0][0]]]) / n
return [G, g]
def get_vc_hom(w, wA1, wA2, reg, lambdapar, z_s=None, for_omegaOLS=False):
'''
VC matrix \psi of Spatial error with homoscedasticity. As in
Anselin (2011) :cite:`Anselin2011` (p. 20)
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lambdapar : float
Spatial parameter estimated in previous step of the
procedure
z_s : array
optional argument for spatially filtered Z (to be
passed only if endogenous variables are present)
for_omegaOLS : boolean
If True (default=False), it also returns P, needed
only in the computation of Omega
Returns
-------
psi : array
2x2 VC matrix
a1 : array
nx1 vector a1. If z_s=None, a1 = 0.
a2 : array
nx1 vector a2. If z_s=None, a2 = 0.
p : array
P matrix. If z_s=None or for_omegaOLS=False, p=0.
'''
u_s = get_spFilter(w, lambdapar, reg.u)
n = float(w.shape[0])
sig2 = np.dot(u_s.T, u_s) / n
mu3 = np.sum(u_s ** 3) / n
mu4 = np.sum(u_s ** 4) / n
tr11 = wA1 * wA1
tr11 = np.sum(tr11.diagonal())
tr12 = wA1 * (wA2 * 2)
tr12 = np.sum(tr12.diagonal())
tr22 = wA2 * wA2 * 2
tr22 = np.sum(tr22.diagonal())
vecd1 = np.array([wA1.diagonal()]).T
psi11 = 2 * sig2 ** 2 * tr11 + \
(mu4 - 3 * sig2 ** 2) * np.dot(vecd1.T, vecd1)
psi12 = sig2 ** 2 * tr12
psi22 = sig2 ** 2 * tr22
a1, a2, p = 0., 0., 0.
if for_omegaOLS:
x_s = get_spFilter(w, lambdapar, reg.x)
p = la.inv(spdot(x_s.T, x_s) / n)
if issubclass(type(z_s), np.ndarray) or \
issubclass(type(z_s), SP.csr.csr_matrix) or \
issubclass(type(z_s), SP.csc.csc_matrix):
alpha1 = (-2 / n) * spdot(z_s.T, wA1 * u_s)
alpha2 = (-2 / n) * spdot(z_s.T, wA2 * u_s)
hth = spdot(reg.h.T, reg.h)
hthni = la.inv(hth / n)
htzsn = spdot(reg.h.T, z_s) / n
p = spdot(hthni, htzsn)
p = spdot(p, la.inv(spdot(htzsn.T, p)))
hp = spdot(reg.h, p)
a1 = spdot(hp, alpha1)
a2 = spdot(hp, alpha2)
psi11 = psi11 + \
sig2 * spdot(a1.T, a1) + \
2 * mu3 * spdot(a1.T, vecd1)
psi12 = psi12 + \
sig2 * spdot(a1.T, a2) + \
mu3 * spdot(a2.T, vecd1) # 3rd term=0
psi22 = psi22 + \
sig2 * spdot(a2.T, a2) # 3rd&4th terms=0 bc vecd2=0
psi = np.array(
[[psi11[0][0], psi12[0][0]], [psi12[0][0], psi22[0][0]]]) / n
return psi, a1, a2, p
def get_omega_hom(w, wA1, wA2, reg, lamb, G):
'''
Omega VC matrix for Hom models with endogenous variables computed as in
Anselin (2011) :cite:`Anselin2011` (p. 21).
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lamb : float
Spatial parameter estimated in previous step of the
procedure
G : array
Matrix 'G' of the moment equation
Returns
-------
omega : array
Omega matrix of VC of the model
'''
n = float(w.shape[0])
z_s = get_spFilter(w, lamb, reg.z)
u_s = get_spFilter(w, lamb, reg.u)
sig2 = np.dot(u_s.T, u_s) / n
mu3 = np.sum(u_s ** 3) / n
vecdA1 = np.array([wA1.diagonal()]).T
psi, a1, a2, p = get_vc_hom(w, wA1, wA2, reg, lamb, z_s)
j = np.dot(G, np.array([[1.], [2 * lamb]]))
psii = la.inv(psi)
t2 = spdot(reg.h.T, np.hstack((a1, a2)))
psiDL = (mu3 * spdot(reg.h.T, np.hstack((vecdA1, np.zeros((int(n), 1))))) +
sig2 * spdot(reg.h.T, np.hstack((a1, a2)))) / n
oDD = spdot(la.inv(spdot(reg.h.T, reg.h)), spdot(reg.h.T, z_s))
oDD = sig2 * la.inv(spdot(z_s.T, spdot(reg.h, oDD)))
oLL = la.inv(spdot(j.T, spdot(psii, j))) / n
oDL = spdot(spdot(spdot(p.T, psiDL), spdot(psii, j)), oLL)
o_upper = np.hstack((oDD, oDL))
o_lower = np.hstack((oDL.T, oLL))
return np.vstack((o_upper, o_lower)), float(sig2)
def get_omega_hom_ols(w, wA1, wA2, reg, lamb, G):
'''
Omega VC matrix for Hom models without endogenous variables (OLS) computed
as in Anselin (2011) :cite:`Anselin2011`.
...
Parameters
----------
w : Sparse matrix
Spatial weights sparse matrix
reg : reg
Regression object
lamb : float
Spatial parameter estimated in previous step of the
procedure
G : array
Matrix 'G' of the moment equation
Returns
-------
omega : array
Omega matrix of VC of the model
'''
n = float(w.shape[0])
x_s = get_spFilter(w, lamb, reg.x)
u_s = get_spFilter(w, lamb, reg.u)
sig2 = np.dot(u_s.T, u_s) / n
vecdA1 = np.array([wA1.diagonal()]).T
psi, a1, a2, p = get_vc_hom(w, wA1, wA2, reg, lamb, for_omegaOLS=True)
j = np.dot(G, np.array([[1.], [2 * lamb]]))
psii = la.inv(psi)
oDD = sig2 * la.inv(spdot(x_s.T, x_s))
oLL = la.inv(spdot(j.T, spdot(psii, j))) / n
#oDL = np.zeros((oDD.shape[0], oLL.shape[1]))
mu3 = np.sum(u_s ** 3) / n
psiDL = (mu3 * spdot(reg.x.T, np.hstack((vecdA1, np.zeros((int(n), 1)))))) / n
oDL = spdot(spdot(spdot(p.T, psiDL), spdot(psii, j)), oLL)
o_upper = np.hstack((oDD, oDL))
o_lower = np.hstack((oDL.T, oLL))
return np.vstack((o_upper, o_lower)), float(sig2)
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
| 40.529289 | 155 | 0.568988 | 7,888 | 58,119 | 4.111435 | 0.077206 | 0.008634 | 0.012827 | 0.013814 | 0.86442 | 0.85483 | 0.844871 | 0.834048 | 0.829145 | 0.825907 | 0 | 0.035211 | 0.345206 | 58,119 | 1,433 | 156 | 40.557572 | 0.81698 | 0.720315 | 0 | 0.498155 | 0 | 0 | 0.027382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04059 | false | 0 | 0.04797 | 0 | 0.125461 | 0.01107 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
97fdc716b5841524f375da88234fb01887712ac0 | 72 | py | Python | chalice_jwt/utils.py | marktennyson/chalice-jwt | 96a95a3130c3c734ea6c1085405ff06b3e3aef6f | [
"MIT"
] | 3 | 2021-04-12T13:30:20.000Z | 2022-02-13T16:02:57.000Z | chalice_jwt/utils.py | marktennyson/chalice-jwt | 96a95a3130c3c734ea6c1085405ff06b3e3aef6f | [
"MIT"
] | null | null | null | chalice_jwt/utils.py | marktennyson/chalice-jwt | 96a95a3130c3c734ea6c1085405ff06b3e3aef6f | [
"MIT"
] | 1 | 2021-05-12T12:01:28.000Z | 2021-05-12T12:01:28.000Z | from json import dumps
def _jsonify(**kwargs):
return dumps(kwargs) | 18 | 24 | 0.736111 | 10 | 72 | 5.2 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 72 | 4 | 24 | 18 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 6 |
3f08aac28ffe1879adab51d57072a28d51dac2bb | 125 | py | Python | get_env_data.py | BEisem/PlexTraktSync | 2a1ec95bcccb20a20afd08cdc3bd396019083439 | [
"MIT"
] | null | null | null | get_env_data.py | BEisem/PlexTraktSync | 2a1ec95bcccb20a20afd08cdc3bd396019083439 | [
"MIT"
] | null | null | null | get_env_data.py | BEisem/PlexTraktSync | 2a1ec95bcccb20a20afd08cdc3bd396019083439 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from plex_trakt_sync.get_env_data import get_env_data
if __name__ == "__main__":
get_env_data()
| 17.857143 | 53 | 0.76 | 21 | 125 | 3.761905 | 0.666667 | 0.227848 | 0.379747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009259 | 0.136 | 125 | 6 | 54 | 20.833333 | 0.722222 | 0.168 | 0 | 0 | 0 | 0 | 0.07767 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 6 |
3f3d5d89c0d71c8d41af50783867903978e82791 | 18,557 | py | Python | unit/test_get_needs_restart.py | FizikRoot/ansible-cartridge | ad06411ec701b68fbf5b8ed5e184a47ffb0ac70f | [
"BSD-2-Clause"
] | 17 | 2019-09-02T15:31:56.000Z | 2022-03-29T18:49:59.000Z | unit/test_get_needs_restart.py | FizikRoot/ansible-cartridge | ad06411ec701b68fbf5b8ed5e184a47ffb0ac70f | [
"BSD-2-Clause"
] | 171 | 2019-10-24T15:34:34.000Z | 2022-03-29T09:18:46.000Z | unit/test_get_needs_restart.py | FizikRoot/ansible-cartridge | ad06411ec701b68fbf5b8ed5e184a47ffb0ac70f | [
"BSD-2-Clause"
] | 14 | 2019-12-23T08:27:06.000Z | 2021-07-06T15:53:49.000Z | import itertools
import sys
import unittest
from parameterized import parameterized
import module_utils.helpers as helpers
from unit.instance import Instance
sys.modules['ansible.module_utils.helpers'] = helpers
from library.cartridge_get_needs_restart import set_needs_restart
def call_needs_restart(
console_sock,
app_name=Instance.APP_NAME,
instance_conf_file=Instance.INSTANCE_CONF_PATH,
app_conf_file=Instance.APP_CONF_PATH,
instance_dist_dir=Instance.APP_CODE_PATH,
instance_id=Instance.instance_id,
config=None,
cluster_cookie=Instance.COOKIE,
cartridge_not_save_cookie_in_app_config=False,
cartridge_defaults=None,
stateboard=False,
check_package_updated=False,
check_config_updated=False,
keys_to_remove=None,
):
instance_info = {
'console_sock': console_sock,
'app_conf_file': app_conf_file,
'conf_file': instance_conf_file,
'instance_id': instance_id,
'instance_dist_dir': instance_dist_dir,
}
params = {
'app_name': app_name,
'config': config or {},
'cartridge_defaults': cartridge_defaults or {},
'cluster_cookie': cluster_cookie,
'cartridge_not_save_cookie_in_app_config': cartridge_not_save_cookie_in_app_config,
'stateboard': stateboard,
'instance_info': instance_info,
'check_package_updated': check_package_updated,
'check_config_updated': check_config_updated,
}
if keys_to_remove:
for key in keys_to_remove:
del params[key]
return set_needs_restart(params)
class TestGetNeedsRestart(unittest.TestCase):
def setUp(self):
self.instance = Instance()
self.console_sock = self.instance.console_sock
self.cookie = self.instance.cluster_cookie
self.instance.start()
def test_optional_fields(self):
for key in ['app_name', 'config', 'cartridge_defaults', 'stateboard']:
res = call_needs_restart(
console_sock=self.console_sock,
check_config_updated=True,
keys_to_remove=[key],
)
self.assertTrue(res.failed)
self.assertEqual(res.msg, "Argument '%s' is required to check for configuration updates" % key)
def test_cluster_cookie(self):
res = call_needs_restart(
console_sock=self.console_sock,
check_config_updated=True,
cartridge_not_save_cookie_in_app_config=True,
cluster_cookie=None,
)
self.assertFalse(res.failed)
res = call_needs_restart(
console_sock=self.console_sock,
check_config_updated=True,
cartridge_not_save_cookie_in_app_config=False,
cluster_cookie=None,
)
self.assertTrue(res.failed)
self.assertEqual(
res.msg,
"'cartridge_cluster_cookie' should be set to check for configuration "
"updates when 'cartridge_not_save_cookie_in_app_config' is false"
)
# cookie isn't in config
self.instance.set_app_config({}, set_cookie=False)
res = call_needs_restart(
console_sock=self.console_sock,
check_config_updated=True,
cartridge_not_save_cookie_in_app_config=True,
cluster_cookie="some-new-cookie",
)
self.assertFalse(res.failed, res.msg)
self.assertFalse(res.fact)
# cookie was in config, but now it isn't
self.instance.set_app_config({})
res = call_needs_restart(
console_sock=self.console_sock,
check_config_updated=True,
cartridge_not_save_cookie_in_app_config=True,
cluster_cookie="some-new-cookie",
)
self.assertFalse(res.failed, res.msg)
self.assertTrue(res.fact)
# cookie is in config and it changed
self.instance.set_app_config({})
res = call_needs_restart(
console_sock=self.console_sock,
check_config_updated=True,
cartridge_not_save_cookie_in_app_config=False,
cluster_cookie="some-new-cookie",
)
self.assertFalse(res.failed, res.msg)
self.assertTrue(res.fact)
# cookie wasn't in config, but now it is
self.instance.set_app_config({}, set_cookie=False)
res = call_needs_restart(
console_sock=self.console_sock,
check_config_updated=True,
cartridge_not_save_cookie_in_app_config=False,
cluster_cookie=self.instance.COOKIE,
)
self.assertFalse(res.failed, res.msg)
self.assertTrue(res.fact)
def test_instance_not_running(self):
# console sock doesn't exists
self.instance.remove_file(self.console_sock)
res = call_needs_restart(
console_sock=self.console_sock
)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
# cannot connect to console sock
bad_socket_path = 'bad-socket-path'
self.instance.write_file(bad_socket_path)
res = call_needs_restart(
console_sock=bad_socket_path
)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
def test_box_cfg_is_function(self):
param_name = 'some-param'
old_value = 'old-value'
new_value = 'new-value'
self.instance.set_box_cfg_function()
self.instance.set_instance_config({
param_name: old_value,
})
# no check
res = call_needs_restart(
console_sock=self.console_sock,
config={
param_name: old_value,
},
)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
# nothing changed
res = call_needs_restart(
console_sock=self.console_sock,
config={
param_name: old_value,
},
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
# param was changed
res = call_needs_restart(
console_sock=self.console_sock,
config={
param_name: new_value,
},
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
def test_code_was_updated(self):
# code was updated yesterday, socket today - restart isn't needed
self.instance.set_path_m_time(self.instance.APP_CODE_PATH, self.instance.DATE_YESTERDAY)
self.instance.set_path_m_time(self.console_sock, self.instance.DATE_TODAY)
res = call_needs_restart(console_sock=self.console_sock, check_package_updated=True)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
self.assertFalse(res.fact)
# code was updated today, socket yesterday - needs restart
self.instance.set_path_m_time(self.instance.APP_CODE_PATH, self.instance.DATE_TODAY)
self.instance.set_path_m_time(self.console_sock, self.instance.DATE_YESTERDAY)
# no check
res = call_needs_restart(console_sock=self.console_sock)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
res = call_needs_restart(console_sock=self.console_sock, check_package_updated=True)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
@parameterized.expand(
itertools.product(
["instance", "stateboard"],
["memtx_memory", "vinyl_memory"],
)
)
def test_config_changed(self, instance_type, memory_param_name):
param_name = 'param'
param_current_value = 'current-value'
param_new_value = 'new-value'
current_memory_size = 100
memtx_memory_new_value = 200
stateboard = instance_type == 'stateboard'
self.instance.set_instance_config({
param_name: param_current_value,
memory_param_name: current_memory_size
})
self.instance.set_box_cfg(**{memory_param_name: current_memory_size})
# nothing changed
res = call_needs_restart(
console_sock=self.console_sock,
config={
param_name: param_current_value,
memory_param_name: current_memory_size
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
self.assertFalse(res.fact)
# param changed, memory size not
res = call_needs_restart(
console_sock=self.console_sock,
config={
param_name: param_new_value,
memory_param_name: current_memory_size
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
# param isn't changed
# memory size is changed in config
# but isn't changed on instance
self.instance.set_box_cfg(**{memory_param_name: current_memory_size})
res = call_needs_restart(
console_sock=self.console_sock,
config={
param_name: param_current_value,
memory_param_name: memtx_memory_new_value
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
# param isn't changed
# memory size is changed in config
# and changed on instance
self.instance.set_box_cfg(**{memory_param_name: memtx_memory_new_value})
res = call_needs_restart(
console_sock=self.console_sock,
config={
param_name: param_current_value,
memory_param_name: memtx_memory_new_value
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
self.assertFalse(res.fact)
# param is changed
# memory size is changed in config
# and changed on instance
self.instance.set_box_cfg(**{memory_param_name: memtx_memory_new_value})
res = call_needs_restart(
console_sock=self.console_sock,
config={
param_name: param_new_value,
memory_param_name: memtx_memory_new_value
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
@parameterized.expand(
itertools.product(
["instance", "stateboard"],
["memtx_memory", "vinyl_memory"],
)
)
def test_app_config_changed(self, instance_type, memory_param_name):
param_name = 'param'
param_current_value = 'current-value'
param_new_value = 'new-value'
current_memory_size = 100
memtx_memory_new_value = 200
stateboard = instance_type == 'stateboard'
self.instance.set_app_config({
param_name: param_current_value,
memory_param_name: current_memory_size
})
self.instance.set_box_cfg(**{memory_param_name: current_memory_size})
# nothing changed
res = call_needs_restart(
console_sock=self.console_sock,
cartridge_defaults={
param_name: param_current_value,
memory_param_name: current_memory_size
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
self.assertFalse(res.fact)
# param changed, memory size not
res = call_needs_restart(
console_sock=self.console_sock,
cartridge_defaults={
param_name: param_new_value,
memory_param_name: current_memory_size
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
if not stateboard:
self.assertTrue(res.changed)
self.assertTrue(res.fact)
else:
self.assertFalse(res.changed)
self.assertFalse(res.fact)
# param isn't changed
# memory size is changed in config
# but isn't changed on instance
self.instance.set_box_cfg(**{memory_param_name: current_memory_size})
res = call_needs_restart(
console_sock=self.console_sock,
cartridge_defaults={
param_name: param_current_value,
memory_param_name: memtx_memory_new_value
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
if not stateboard:
self.assertTrue(res.changed)
self.assertTrue(res.fact)
else:
self.assertFalse(res.changed)
self.assertFalse(res.fact)
# param isn't changed
# memory size is changed in config
# and changed on instance
self.instance.set_box_cfg(**{memory_param_name: memtx_memory_new_value})
res = call_needs_restart(
console_sock=self.console_sock,
cartridge_defaults={
param_name: param_current_value,
memory_param_name: memtx_memory_new_value
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
self.assertFalse(res.fact)
# param is changed
# memory size is changed in config
# and changed on instance
self.instance.set_box_cfg(**{memory_param_name: memtx_memory_new_value})
res = call_needs_restart(
console_sock=self.console_sock,
cartridge_defaults={
param_name: param_new_value,
memory_param_name: memtx_memory_new_value
},
stateboard=stateboard,
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
if not stateboard:
self.assertTrue(res.changed)
self.assertTrue(res.fact)
else:
self.assertFalse(res.changed)
self.assertFalse(res.fact)
@parameterized.expand([
["memtx_memory"],
["vinyl_memory"],
])
def test_memory_size_changed(self, memory_param_name):
current_memory_size = 100
new_memory_size_instance = 200
new_memory_size_app = 300
self.instance.set_app_config({
memory_param_name: current_memory_size
})
self.instance.set_instance_config({
memory_param_name: current_memory_size
})
self.instance.set_box_cfg(**{memory_param_name: current_memory_size})
# nothing changed
res = call_needs_restart(
console_sock=self.console_sock,
config={
memory_param_name: current_memory_size
},
cartridge_defaults={
memory_param_name: current_memory_size
},
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
self.assertFalse(res.fact)
# memory size changed only in cartridge_defaults
res = call_needs_restart(
console_sock=self.console_sock,
config={
memory_param_name: current_memory_size
},
cartridge_defaults={
memory_param_name: new_memory_size_instance
},
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
self.assertFalse(res.fact)
# memory size changed both in cartridge_defaults and config
res = call_needs_restart(
console_sock=self.console_sock,
config={
memory_param_name: new_memory_size_instance
},
cartridge_defaults={
memory_param_name: new_memory_size_app
},
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
# memory size changed both in cartridge_defaults and config
# memory size on instance is equal to value from cartridge_defaults
self.instance.set_box_cfg(**{memory_param_name: new_memory_size_app})
res = call_needs_restart(
console_sock=self.console_sock,
config={
memory_param_name: new_memory_size_instance
},
cartridge_defaults={
memory_param_name: new_memory_size_app
},
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertTrue(res.changed)
self.assertTrue(res.fact)
# memory size changed both in cartridge_defaults and config
# memory size on instance is equal to value from config
self.instance.set_box_cfg(**{memory_param_name: new_memory_size_instance})
res = call_needs_restart(
console_sock=self.console_sock,
config={
memory_param_name: new_memory_size_instance
},
cartridge_defaults={
memory_param_name: new_memory_size_app
},
check_config_updated=True,
)
self.assertFalse(res.failed, msg=res.msg)
self.assertFalse(res.changed)
self.assertFalse(res.fact)
def tearDown(self):
self.instance.stop()
del self.instance
| 33.801457 | 107 | 0.620844 | 2,093 | 18,557 | 5.179169 | 0.064023 | 0.070018 | 0.088007 | 0.065775 | 0.826384 | 0.800646 | 0.790037 | 0.777952 | 0.760978 | 0.760978 | 0 | 0.001619 | 0.300803 | 18,557 | 548 | 108 | 33.863139 | 0.833834 | 0.0693 | 0 | 0.664414 | 0 | 0 | 0.043067 | 0.008996 | 0 | 0 | 0 | 0 | 0.193694 | 1 | 0.024775 | false | 0 | 0.015766 | 0 | 0.045045 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
3f6d98eae3e15e64e42e4db494f92e4b4bbefe16 | 6,112 | py | Python | examples/pyaos8/pools.py | michaelrosejr/pyaos8 | 2fc7c241692bad7bd1a5e25c87cd65d5830a9dd5 | [
"Apache-2.0"
] | 2 | 2019-07-31T07:35:47.000Z | 2020-01-10T15:45:48.000Z | examples/pyaos8/pools.py | michaelrosejr/pyaos8 | 2fc7c241692bad7bd1a5e25c87cd65d5830a9dd5 | [
"Apache-2.0"
] | null | null | null | examples/pyaos8/pools.py | michaelrosejr/pyaos8 | 2fc7c241692bad7bd1a5e25c87cd65d5830a9dd5 | [
"Apache-2.0"
] | 2 | 2018-11-17T04:33:35.000Z | 2020-09-09T16:08:34.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests, json
import sys
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# from aosget import aosget
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def aosget(url, auth):
aoscookie = dict(SESSION = auth.uidaruba)
try:
r = requests.get(url, cookies=aoscookie, verify=False)
if r.status_code != 200:
print('Status:', r.status_code, 'Headers:', r.headers,
'Error Response:', r.reason)
return r.text
except requests.exceptions.RequestException as error:
#print("Error")
return "Error:\n" + str(error) + sys._getframe().f_code.co_name + ": An Error has occured"
def aosput(url, auth, payload):
aoscookie = dict(SESSION = auth.uidaruba)
try:
r = requests.post(url, cookies=aoscookie, data=payload, verify=False)
if r.status_code != 200:
print('Status:', r.status_code, 'Headers:', r.headers,
'Error Response:', r.reason)
return r.text
except requests.exceptions.RequestException as error:
#print("Error")
return "Error:\n" + str(error) + " get_interfaces: An Error has occured"
url_write = "https://" + auth.aos8ip + ":4343/v1/configuration/object/write_memory?json=1&UIDARUBA=" + auth.uidaruba
try:
r = requests.post(url_write, cookies=aoscookie, verify=False)
if r.status_code != 200:
print('Status:', r.status_code, 'Headers:', r.headers,
'Error Response:', r.reason)
return r.text
except requests.exceptions.RequestException as error:
#print("Error")
return "Error:\n" + str(error) + " url_write: An Error has occured"
class pools():
def get_ipv6_dhcp_excld_addr_cfg(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ipv6_dhcp_excld_addr_cfg?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_ip_dhcp_excld_addr_cfg(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ip_dhcp_excld_addr_cfg?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_nat_pool(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"nat_pool?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_ip_dhcp_adaptive(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ip_dhcp_adaptive?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_srv_dhcp_cfg(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"srv_dhcp_cfg?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_ip_dhcp_opt82_web(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ip_dhcp_opt82_web?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_ip_dhcp_dfl_pool_cfg(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ip_dhcp_dfl_pool_cfg?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_ip_dhcp_pool_cfg(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ip_dhcp_pool_cfg?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_srv_dhcpv6_cfg(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"srv_dhcpv6_cfg?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_l2tp_local_pool_ipv6(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"l2tp_local_pool_ipv6?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_ipv6_dhcp_pool_cfg(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ipv6_dhcp_pool_cfg?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_ip_dhcp_opt82(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ip_dhcp_opt82?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_tun_pool(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"tun_pool?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_vlan_pool(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"vlan_pool?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_l2tp_local_pool(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"l2tp_local_pool?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_ip_dhcp_lb_cfg(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ip_dhcp_lb_cfg?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_pptp_local_pool(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"pptp_local_pool?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
def get_ip_dhcp_ping_check_cfg(auth):
url = "https://" + auth.aos8ip + ":4343/v1/configuration/object/" \
"ip_dhcp_ping_check_cfg?json=1&UIDARUBA=" + auth.uidaruba
response = aosget(url, auth)
return response
| 34.925714 | 120 | 0.61502 | 741 | 6,112 | 4.904184 | 0.125506 | 0.069345 | 0.067969 | 0.09934 | 0.864887 | 0.853605 | 0.853605 | 0.832141 | 0.807925 | 0.807925 | 0 | 0.036129 | 0.252781 | 6,112 | 174 | 121 | 35.126437 | 0.75958 | 0.017997 | 0 | 0.596774 | 0 | 0 | 0.257672 | 0.198132 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.024194 | 0 | 0.387097 | 0.024194 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
4540ac8e8f593022b07f94ad6435708166f39235 | 7,215 | py | Python | autolens/plot/plane_plots.py | PyJedi/PyAutoLens | bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7 | [
"MIT"
] | 1 | 2020-04-06T20:07:56.000Z | 2020-04-06T20:07:56.000Z | autolens/plot/plane_plots.py | PyJedi/PyAutoLens | bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7 | [
"MIT"
] | null | null | null | autolens/plot/plane_plots.py | PyJedi/PyAutoLens | bcfb2e7b447aa24508fc648d60b6fd9b4fd852e7 | [
"MIT"
] | null | null | null | from autoarray.plot import plotters
from autoastro.plot import lensing_plotters
@lensing_plotters.set_include_and_plotter
@plotters.set_labels
def profile_image(plane, grid, positions=None, include=None, plotter=None):
plotter.plot_array(
array=plane.profile_image_from_grid(grid=grid),
mask=include.mask_from_grid(grid=grid),
positions=positions,
critical_curves=include.critical_curves_from_obj(obj=plane),
light_profile_centres=include.light_profile_centres_of_galaxies_from_obj(
obj=plane
),
mass_profile_centres=include.mass_profile_centres_of_galaxies_from_obj(
obj=plane
),
include_origin=include.origin,
)
@lensing_plotters.set_include_and_plotter
@plotters.set_labels
def plane_image(plane, grid, positions=None, caustics=None, include=None, plotter=None):
plotter.plot_array(
array=plane.plane_image_from_grid(grid=grid).array,
positions=positions,
caustics=caustics,
grid=include.grid_from_grid(grid=grid),
light_profile_centres=include.light_profile_centres_of_galaxies_from_obj(
obj=plane
),
mass_profile_centres=include.mass_profile_centres_of_galaxies_from_obj(
obj=plane
),
include_origin=include.origin,
)
@lensing_plotters.set_include_and_plotter
@plotters.set_labels
def convergence(plane, grid, include=None, plotter=None):
plotter.plot_array(
array=plane.convergence_from_grid(grid=grid),
mask=include.mask_from_grid(grid=grid),
critical_curves=include.critical_curves_from_obj(obj=plane),
light_profile_centres=include.light_profile_centres_of_galaxies_from_obj(
obj=plane
),
mass_profile_centres=include.mass_profile_centres_of_galaxies_from_obj(
obj=plane
),
include_origin=include.origin,
)
@lensing_plotters.set_include_and_plotter
@plotters.set_labels
def potential(plane, grid, include=None, plotter=None):
plotter.plot_array(
array=plane.potential_from_grid(grid=grid),
mask=include.mask_from_grid(grid=grid),
critical_curves=include.critical_curves_from_obj(obj=plane),
light_profile_centres=include.light_profile_centres_of_galaxies_from_obj(
obj=plane
),
mass_profile_centres=include.mass_profile_centres_of_galaxies_from_obj(
obj=plane
),
include_origin=include.origin,
)
@lensing_plotters.set_include_and_plotter
@plotters.set_labels
def deflections_y(plane, grid, include=None, plotter=None):
deflections = plane.deflections_from_grid(grid=grid)
deflections_y = grid.mapping.array_stored_1d_from_sub_array_1d(
sub_array_1d=deflections[:, 0]
)
plotter.plot_array(
array=deflections_y,
mask=include.mask_from_grid(grid=grid),
critical_curves=include.critical_curves_from_obj(obj=plane),
light_profile_centres=include.light_profile_centres_of_galaxies_from_obj(
obj=plane
),
mass_profile_centres=include.mass_profile_centres_of_galaxies_from_obj(
obj=plane
),
include_origin=include.origin,
)
@lensing_plotters.set_include_and_plotter
@plotters.set_labels
def deflections_x(plane, grid, include=None, plotter=None):
deflections = plane.deflections_from_grid(grid=grid)
deflections_x = grid.mapping.array_stored_1d_from_sub_array_1d(
sub_array_1d=deflections[:, 1]
)
plotter.plot_array(
array=deflections_x,
mask=include.mask_from_grid(grid=grid),
critical_curves=include.critical_curves_from_obj(obj=plane),
light_profile_centres=include.light_profile_centres_of_galaxies_from_obj(
obj=plane
),
mass_profile_centres=include.mass_profile_centres_of_galaxies_from_obj(
obj=plane
),
include_origin=include.origin,
)
@lensing_plotters.set_include_and_plotter
@plotters.set_labels
def magnification(plane, grid, include=None, plotter=None):
plotter.plot_array(
array=plane.magnification_from_grid(grid=grid),
mask=include.mask_from_grid(grid=grid),
critical_curves=include.critical_curves_from_obj(obj=plane),
light_profile_centres=include.light_profile_centres_of_galaxies_from_obj(
obj=plane
),
mass_profile_centres=include.mass_profile_centres_of_galaxies_from_obj(
obj=plane
),
include_origin=include.origin,
)
@lensing_plotters.set_include_and_sub_plotter
@plotters.set_labels
def image_and_source_plane_subplot(
image_plane,
source_plane,
grid,
indexes=None,
positions=None,
axis_limits=None,
include=None,
sub_plotter=None,
):
number_subplots = 2
sub_plotter.open_subplot_figure(number_subplots=number_subplots)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=1)
plane_grid(
plane=image_plane,
grid=grid,
indexes=indexes,
axis_limits=axis_limits,
positions=positions,
critical_curves=include.critical_curves_from_obj(obj=image_plane),
include=include,
plotter=sub_plotter,
)
source_plane_grid = image_plane.traced_grid_from_grid(grid=grid)
sub_plotter.setup_subplot(number_subplots=number_subplots, subplot_index=2)
plane_grid(
plane=source_plane,
grid=source_plane_grid,
indexes=indexes,
axis_limits=axis_limits,
positions=positions,
caustics=include.caustics_from_obj(obj=image_plane),
include=include,
plotter=sub_plotter,
)
sub_plotter.output.subplot_to_figure()
sub_plotter.figure.close()
@lensing_plotters.set_include_and_plotter
@plotters.set_labels
def plane_grid(
plane,
grid,
indexes=None,
axis_limits=None,
positions=None,
critical_curves=None,
caustics=None,
include=None,
plotter=None,
):
plotter.plot_grid(
grid=grid,
positions=positions,
axis_limits=axis_limits,
indexes=indexes,
critical_curves=critical_curves,
caustics=caustics,
light_profile_centres=include.light_profile_centres_of_galaxies_from_obj(
obj=plane
),
mass_profile_centres=include.mass_profile_centres_of_galaxies_from_obj(
obj=plane
),
include_origin=include.origin,
include_border=include.border,
)
@lensing_plotters.set_include_and_plotter
@plotters.set_labels
def contribution_map(plane, mask=None, positions=None, include=None, plotter=None):
plotter.plot_array(
array=plane.contribution_map,
mask=mask,
positions=positions,
light_profile_centres=include.light_profile_centres_of_galaxies_from_obj(
obj=plane
),
mass_profile_centres=include.mass_profile_centres_of_galaxies_from_obj(
obj=plane
),
critical_curves=include.critical_curves_from_obj(obj=plane),
include_origin=include.origin,
include_border=include.border,
)
| 29.81405 | 88 | 0.712405 | 876 | 7,215 | 5.455479 | 0.070776 | 0.105461 | 0.056497 | 0.078468 | 0.835321 | 0.788449 | 0.788449 | 0.788449 | 0.788449 | 0.748692 | 0 | 0.001928 | 0.209148 | 7,215 | 241 | 89 | 29.937759 | 0.835612 | 0 | 0 | 0.69802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049505 | false | 0 | 0.009901 | 0 | 0.059406 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.