hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
e0604e5713f1319e78797d5008443063f5edc27c
30,584
py
Python
cinder/tests/unit/volume/drivers/netapp/test_utils.py
overcastcloud/cinder
ad977d456c5d50e992eee95ea40f4e3dd21981dc
[ "Apache-2.0" ]
null
null
null
cinder/tests/unit/volume/drivers/netapp/test_utils.py
overcastcloud/cinder
ad977d456c5d50e992eee95ea40f4e3dd21981dc
[ "Apache-2.0" ]
null
null
null
cinder/tests/unit/volume/drivers/netapp/test_utils.py
overcastcloud/cinder
ad977d456c5d50e992eee95ea40f4e3dd21981dc
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp driver utility module """ import copy import platform import mock from oslo_concurrency import processutils as putils from cinder import context from cinder import exception from cinder import test import cinder.tests.unit.volume.drivers.netapp.fakes as fake from cinder import version from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import qos_specs from cinder.volume import volume_types class NetAppDriverUtilsTestCase(test.TestCase): @mock.patch.object(na_utils, 'LOG', mock.Mock()) def test_validate_instantiation_proxy(self): kwargs = {'netapp_mode': 'proxy'} na_utils.validate_instantiation(**kwargs) self.assertEqual(na_utils.LOG.warning.call_count, 0) @mock.patch.object(na_utils, 'LOG', mock.Mock()) def test_validate_instantiation_no_proxy(self): kwargs = {'netapp_mode': 'asdf'} na_utils.validate_instantiation(**kwargs) self.assertEqual(na_utils.LOG.warning.call_count, 1) def test_check_flags(self): class TestClass(object): pass required_flags = ['flag1', 'flag2'] configuration = TestClass() setattr(configuration, 'flag1', 'value1') setattr(configuration, 'flag3', 'value3') self.assertRaises(exception.InvalidInput, na_utils.check_flags, required_flags, configuration) setattr(configuration, 'flag2', 'value2') self.assertIsNone(na_utils.check_flags(required_flags, configuration)) def test_to_bool(self): self.assertTrue(na_utils.to_bool(True)) self.assertTrue(na_utils.to_bool('true')) self.assertTrue(na_utils.to_bool('yes')) self.assertTrue(na_utils.to_bool('y')) self.assertTrue(na_utils.to_bool(1)) self.assertTrue(na_utils.to_bool('1')) self.assertFalse(na_utils.to_bool(False)) self.assertFalse(na_utils.to_bool('false')) self.assertFalse(na_utils.to_bool('asdf')) self.assertFalse(na_utils.to_bool('no')) self.assertFalse(na_utils.to_bool('n')) self.assertFalse(na_utils.to_bool(0)) self.assertFalse(na_utils.to_bool('0')) self.assertFalse(na_utils.to_bool(2)) self.assertFalse(na_utils.to_bool('2')) def test_set_safe_attr(self): fake_object = mock.Mock() fake_object.fake_attr = None # test initial checks self.assertFalse(na_utils.set_safe_attr(None, fake_object, None)) self.assertFalse(na_utils.set_safe_attr(fake_object, None, None)) self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr', None)) # test value isn't changed if it shouldn't be and retval is False fake_object.fake_attr = 'fake_value' self.assertFalse(na_utils.set_safe_attr(fake_object, 'fake_attr', 'fake_value')) self.assertEqual(fake_object.fake_attr, 'fake_value') # test value is changed if it should be and retval is True self.assertTrue(na_utils.set_safe_attr(fake_object, 'fake_attr', 'new_fake_value')) self.assertEqual(fake_object.fake_attr, 'new_fake_value') def test_round_down(self): self.assertAlmostEqual(na_utils.round_down(5.567, '0.00'), 5.56) self.assertAlmostEqual(na_utils.round_down(5.567, '0.0'), 5.5) self.assertAlmostEqual(na_utils.round_down(5.567, '0'), 5) self.assertAlmostEqual(na_utils.round_down(0, '0.00'), 0) self.assertAlmostEqual(na_utils.round_down(-5.567, '0.00'), -5.56) self.assertAlmostEqual(na_utils.round_down(-5.567, '0.0'), -5.5) self.assertAlmostEqual(na_utils.round_down(-5.567, '0'), -5) def test_iscsi_connection_properties(self): actual_properties = na_utils.get_iscsi_connection_properties( fake.ISCSI_FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS, fake.ISCSI_FAKE_PORT) actual_properties_mapped = actual_properties['data'] self.assertDictEqual(actual_properties_mapped, fake.FC_ISCSI_TARGET_INFO_DICT) def test_iscsi_connection_lun_id_type_str(self): FAKE_LUN_ID = '1' actual_properties = na_utils.get_iscsi_connection_properties( FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS, fake.ISCSI_FAKE_PORT) actual_properties_mapped = actual_properties['data'] self.assertIs(type(actual_properties_mapped['target_lun']), int) def test_iscsi_connection_lun_id_type_dict(self): FAKE_LUN_ID = {'id': 'fake_id'} self.assertRaises(TypeError, na_utils.get_iscsi_connection_properties, FAKE_LUN_ID, fake.ISCSI_FAKE_VOLUME, fake.ISCSI_FAKE_IQN, fake.ISCSI_FAKE_ADDRESS, fake.ISCSI_FAKE_PORT) def test_get_volume_extra_specs(self): fake_extra_specs = {'fake_key': 'fake_value'} fake_volume_type = {'extra_specs': fake_extra_specs} fake_volume = {'volume_type_id': 'fake_volume_type_id'} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type', mock.Mock( return_value=fake_volume_type)) self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual(fake_extra_specs, result) def test_get_volume_extra_specs_no_type_id(self): fake_volume = {} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type') self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual({}, result) def test_get_volume_extra_specs_no_volume_type(self): fake_volume = {'volume_type_id': 'fake_volume_type_id'} self.mock_object(context, 'get_admin_context') self.mock_object(volume_types, 'get_volume_type', mock.Mock( return_value=None)) self.mock_object(na_utils, 'log_extra_spec_warnings') result = na_utils.get_volume_extra_specs(fake_volume) self.assertEqual({}, result) def test_log_extra_spec_warnings_obsolete_specs(self): mock_log = self.mock_object(na_utils.LOG, 'warning') na_utils.log_extra_spec_warnings({'netapp:raid_type': 'raid4'}) self.assertEqual(1, mock_log.call_count) def test_log_extra_spec_warnings_deprecated_specs(self): mock_log = self.mock_object(na_utils.LOG, 'warning') na_utils.log_extra_spec_warnings({'netapp_thick_provisioned': 'true'}) self.assertEqual(1, mock_log.call_count) def test_validate_qos_spec_none(self): qos_spec = None # Just return without raising an exception. na_utils.validate_qos_spec(qos_spec) def test_validate_qos_spec_keys_weirdly_cased(self): qos_spec = {'mAxIopS': 33000} # Just return without raising an exception. na_utils.validate_qos_spec(qos_spec) def test_validate_qos_spec_bad_key(self): qos_spec = {'maxFlops': 33000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_validate_qos_spec_bad_key_combination(self): qos_spec = {'maxIOPS': 33000, 'maxBPS': 10000000} self.assertRaises(exception.Invalid, na_utils.validate_qos_spec, qos_spec) def test_map_qos_spec_none(self): qos_spec = None result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(None, result) def test_map_qos_spec_maxiops(self): qos_spec = {'maxIOPs': 33000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '33000iops', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_maxbps(self): qos_spec = {'maxBPS': 1000000} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': '1000000B/s', } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_qos_spec_no_key_present(self): qos_spec = {} mock_get_name = self.mock_object(na_utils, 'get_qos_policy_group_name') mock_get_name.return_value = 'fake_qos_policy' expected = { 'policy_name': 'fake_qos_policy', 'max_throughput': None, } result = na_utils.map_qos_spec(qos_spec, fake.VOLUME) self.assertEqual(expected, result) def test_map_dict_to_lower(self): original = {'UPperKey': 'Value'} expected = {'upperkey': 'Value'} result = na_utils.map_dict_to_lower(original) self.assertEqual(expected, result) def test_get_qos_policy_group_name(self): expected = 'openstack-%s' % fake.VOLUME_ID result = na_utils.get_qos_policy_group_name(fake.VOLUME) self.assertEqual(expected, result) def test_get_qos_policy_group_name_no_id(self): volume = copy.deepcopy(fake.VOLUME) del(volume['id']) result = na_utils.get_qos_policy_group_name(volume) self.assertEqual(None, result) def test_get_qos_policy_group_name_from_info(self): expected = 'openstack-%s' % fake.VOLUME_ID result = na_utils.get_qos_policy_group_name_from_info( fake.QOS_POLICY_GROUP_INFO) self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_info_no_info(self): result = na_utils.get_qos_policy_group_name_from_info(None) self.assertEqual(None, result) def test_get_qos_policy_group_name_from_legacy_info(self): expected = fake.QOS_POLICY_GROUP_NAME result = na_utils.get_qos_policy_group_name_from_info( fake.LEGACY_QOS_POLICY_GROUP_INFO) self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_spec_info(self): expected = 'openstack-%s' % fake.VOLUME_ID result = na_utils.get_qos_policy_group_name_from_info( fake.QOS_POLICY_GROUP_INFO) self.assertEqual(expected, result) def test_get_qos_policy_group_name_from_none_qos_info(self): expected = None result = na_utils.get_qos_policy_group_name_from_info( fake.QOS_POLICY_GROUP_INFO_NONE) self.assertEqual(expected, result) def test_get_valid_qos_policy_group_info_exception_path(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.side_effect = exception.VolumeTypeNotFound expected = fake.QOS_POLICY_GROUP_INFO_NONE result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(expected, result) def test_get_valid_qos_policy_group_info_volume_type_none(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = None expected = fake.QOS_POLICY_GROUP_INFO_NONE result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(expected, result) def test_get_valid_qos_policy_group_info_no_info(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = fake.VOLUME_TYPE mock_get_legacy_qos_policy = self.mock_object(na_utils, 'get_legacy_qos_policy') mock_get_legacy_qos_policy.return_value = None mock_get_valid_qos_spec_from_volume_type = self.mock_object( na_utils, 'get_valid_backend_qos_spec_from_volume_type') mock_get_valid_qos_spec_from_volume_type.return_value = None self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination') expected = fake.QOS_POLICY_GROUP_INFO_NONE result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(expected, result) def test_get_valid_legacy_qos_policy_group_info(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = fake.VOLUME_TYPE mock_get_legacy_qos_policy = self.mock_object(na_utils, 'get_legacy_qos_policy') mock_get_legacy_qos_policy.return_value = fake.LEGACY_QOS mock_get_valid_qos_spec_from_volume_type = self.mock_object( na_utils, 'get_valid_backend_qos_spec_from_volume_type') mock_get_valid_qos_spec_from_volume_type.return_value = None self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination') result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(fake.LEGACY_QOS_POLICY_GROUP_INFO, result) def test_get_valid_spec_qos_policy_group_info(self): mock_get_volume_type = self.mock_object(na_utils, 'get_volume_type_from_volume') mock_get_volume_type.return_value = fake.VOLUME_TYPE mock_get_legacy_qos_policy = self.mock_object(na_utils, 'get_legacy_qos_policy') mock_get_legacy_qos_policy.return_value = None mock_get_valid_qos_spec_from_volume_type = self.mock_object( na_utils, 'get_valid_backend_qos_spec_from_volume_type') mock_get_valid_qos_spec_from_volume_type.return_value =\ fake.QOS_POLICY_GROUP_SPEC self.mock_object(na_utils, 'check_for_invalid_qos_spec_combination') result = na_utils.get_valid_qos_policy_group_info(fake.VOLUME) self.assertEqual(fake.QOS_POLICY_GROUP_INFO, result) def test_get_valid_backend_qos_spec_from_volume_type_no_spec(self): mock_get_spec = self.mock_object( na_utils, 'get_backend_qos_spec_from_volume_type') mock_get_spec.return_value = None mock_validate = self.mock_object(na_utils, 'validate_qos_spec') result = na_utils.get_valid_backend_qos_spec_from_volume_type( fake.VOLUME, fake.VOLUME_TYPE) self.assertEqual(None, result) self.assertEqual(0, mock_validate.call_count) def test_get_valid_backend_qos_spec_from_volume_type(self): mock_get_spec = self.mock_object( na_utils, 'get_backend_qos_spec_from_volume_type') mock_get_spec.return_value = fake.QOS_SPEC mock_validate = self.mock_object(na_utils, 'validate_qos_spec') result = na_utils.get_valid_backend_qos_spec_from_volume_type( fake.VOLUME, fake.VOLUME_TYPE) self.assertEqual(fake.QOS_POLICY_GROUP_SPEC, result) self.assertEqual(1, mock_validate.call_count) def test_get_backend_qos_spec_from_volume_type_no_qos_specs_id(self): volume_type = copy.deepcopy(fake.VOLUME_TYPE) del(volume_type['qos_specs_id']) mock_get_context = self.mock_object(context, 'get_admin_context') result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertEqual(None, result) self.assertEqual(0, mock_get_context.call_count) def test_get_backend_qos_spec_from_volume_type_no_qos_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = None result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertEqual(None, result) def test_get_backend_qos_spec_from_volume_type_with_frontend_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = fake.OUTER_FRONTEND_QOS_SPEC result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertEqual(None, result) def test_get_backend_qos_spec_from_volume_type_with_backend_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = fake.OUTER_BACKEND_QOS_SPEC result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertEqual(fake.QOS_SPEC, result) def test_get_backend_qos_spec_from_volume_type_with_both_spec(self): volume_type = fake.VOLUME_TYPE self.mock_object(context, 'get_admin_context') mock_get_specs = self.mock_object(qos_specs, 'get_qos_specs') mock_get_specs.return_value = fake.OUTER_BOTH_QOS_SPEC result = na_utils.get_backend_qos_spec_from_volume_type(volume_type) self.assertEqual(fake.QOS_SPEC, result) def test_check_for_invalid_qos_spec_combination(self): self.assertRaises(exception.Invalid, na_utils.check_for_invalid_qos_spec_combination, fake.INVALID_QOS_POLICY_GROUP_INFO, fake.VOLUME_TYPE) def test_get_legacy_qos_policy(self): extra_specs = fake.LEGACY_EXTRA_SPECS expected = {'policy_name': fake.QOS_POLICY_GROUP_NAME} result = na_utils.get_legacy_qos_policy(extra_specs) self.assertEqual(expected, result) def test_get_legacy_qos_policy_no_policy_name(self): extra_specs = fake.EXTRA_SPECS result = na_utils.get_legacy_qos_policy(extra_specs) self.assertEqual(None, result) class OpenStackInfoTestCase(test.TestCase): UNKNOWN_VERSION = 'unknown version' UNKNOWN_RELEASE = 'unknown release' UNKNOWN_VENDOR = 'unknown vendor' UNKNOWN_PLATFORM = 'unknown platform' VERSION_STRING_RET_VAL = 'fake_version_1' RELEASE_STRING_RET_VAL = 'fake_release_1' PLATFORM_RET_VAL = 'fake_platform_1' VERSION_INFO_VERSION = 'fake_version_2' VERSION_INFO_RELEASE = 'fake_release_2' RPM_INFO_VERSION = 'fake_version_3' RPM_INFO_RELEASE = 'fake_release_3' RPM_INFO_VENDOR = 'fake vendor 3' PUTILS_RPM_RET_VAL = ('fake_version_3 fake_release_3 fake vendor 3', '') NO_PKG_FOUND = ('', 'whatever') PUTILS_DPKG_RET_VAL = ('epoch:upstream_version-debian_revision', '') DEB_RLS = 'upstream_version-debian_revision' DEB_VENDOR = 'debian_revision' def setUp(self): super(OpenStackInfoTestCase, self).setUp() def test_openstack_info_init(self): info = na_utils.OpenStackInfo() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(return_value=VERSION_STRING_RET_VAL)) def test_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.VERSION_STRING_RET_VAL, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'version_string', mock.Mock(side_effect=Exception)) def test_xcption_in_update_version_from_version_string(self): info = na_utils.OpenStackInfo() info._update_version_from_version_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(return_value=RELEASE_STRING_RET_VAL)) def test_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.RELEASE_STRING_RET_VAL, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(version.version_info, 'release_string', mock.Mock(side_effect=Exception)) def test_xcption_in_update_release_from_release_string(self): info = na_utils.OpenStackInfo() info._update_release_from_release_string() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(return_value=PLATFORM_RET_VAL)) def test_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.PLATFORM_RET_VAL, info._platform) @mock.patch.object(platform, 'platform', mock.Mock(side_effect=Exception)) def test_xcption_in_update_platform(self): info = na_utils.OpenStackInfo() info._update_platform() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=VERSION_INFO_RELEASE)) def test_update_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.VERSION_INFO_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value='')) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(return_value=None)) def test_no_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_version', mock.Mock(return_value=VERSION_INFO_VERSION)) @mock.patch.object(na_utils.OpenStackInfo, '_get_version_info_release', mock.Mock(side_effect=Exception)) def test_xcption_in_info_from_version_info(self): info = na_utils.OpenStackInfo() info._update_info_from_version_info() self.assertEqual(self.VERSION_INFO_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_RPM_RET_VAL)) def test_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.RPM_INFO_VERSION, info._version) self.assertEqual(self.RPM_INFO_RELEASE, info._release) self.assertEqual(self.RPM_INFO_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_rpm_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_xcption_in_update_info_from_rpm(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_rpm() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=PUTILS_DPKG_RET_VAL)) def test_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.DEB_RLS, info._release) self.assertEqual(self.DEB_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertTrue(found_package) @mock.patch.object(putils, 'execute', mock.Mock(return_value=NO_PKG_FOUND)) def test_update_info_from_dpkg_no_pkg_found(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(putils, 'execute', mock.Mock(side_effect=Exception)) def test_xcption_in_update_info_from_dpkg(self): info = na_utils.OpenStackInfo() found_package = info._update_info_from_dpkg() self.assertEqual(self.UNKNOWN_VERSION, info._version) self.assertEqual(self.UNKNOWN_RELEASE, info._release) self.assertEqual(self.UNKNOWN_VENDOR, info._vendor) self.assertEqual(self.UNKNOWN_PLATFORM, info._platform) self.assertFalse(found_package) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=True)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertFalse(mock_updt_from_dpkg.called) @mock.patch.object(na_utils.OpenStackInfo, '_update_version_from_version_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_release_from_release_string', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_platform', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_version_info', mock.Mock()) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_rpm', mock.Mock(return_value=False)) @mock.patch.object(na_utils.OpenStackInfo, '_update_info_from_dpkg') def test_update_openstack_info_rpm_pkg_not_found(self, mock_updt_from_dpkg): info = na_utils.OpenStackInfo() info._update_openstack_info() self.assertTrue(mock_updt_from_dpkg.called)
41.441734
79
0.695298
3,908
30,584
4.999744
0.068833
0.049081
0.062235
0.070526
0.840524
0.815702
0.792006
0.76314
0.750908
0.730232
0
0.006108
0.21848
30,584
737
80
41.497965
0.811355
0.031356
0
0.567468
0
0
0.085242
0.040172
0
0
0
0
0.268022
1
0.118299
false
0.001848
0.022181
0
0.177449
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
0ef056bcb1e47dda4f53a1d1ff6986ca19753e1c
117
py
Python
yaml/__init__.py
mattiasljungstrom/fips
8775e299f710ae5b977d49dc0672b607f2a10378
[ "MIT" ]
429
2015-01-06T18:44:20.000Z
2022-03-19T22:22:11.000Z
yaml/__init__.py
mattiasljungstrom/fips
8775e299f710ae5b977d49dc0672b607f2a10378
[ "MIT" ]
254
2015-01-01T18:11:57.000Z
2022-03-22T09:55:51.000Z
yaml/__init__.py
mattiasljungstrom/fips
8775e299f710ae5b977d49dc0672b607f2a10378
[ "MIT" ]
102
2015-01-17T11:41:16.000Z
2022-02-24T23:47:30.000Z
from sys import version_info if version_info[0] < 3: from yaml.yaml2 import * else: from yaml.yaml3 import *
19.5
28
0.709402
19
117
4.263158
0.631579
0.271605
0
0
0
0
0
0
0
0
0
0.043478
0.213675
117
5
29
23.4
0.836957
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
0.6
0
0.6
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
1641f5346dbf0ff5dd80ac31c99cb4142226029d
52,090
py
Python
tests.py
abmyii/pdf2image
d5094e3b9ea2ff62483093e3415dee1044fd9974
[ "MIT" ]
null
null
null
tests.py
abmyii/pdf2image
d5094e3b9ea2ff62483093e3415dee1044fd9974
[ "MIT" ]
null
null
null
tests.py
abmyii/pdf2image
d5094e3b9ea2ff62483093e3415dee1044fd9974
[ "MIT" ]
null
null
null
import os import sys import errno import pathlib import tempfile import unittest import time import shutil import subprocess from subprocess import Popen, PIPE from tempfile import TemporaryDirectory from memory_profiler import profile as profile_memory sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) from pdf2image import convert_from_bytes, convert_from_path from pdf2image.exceptions import ( PDFInfoNotInstalledError, PDFPageCountError, PDFSyntaxError, ) from functools import wraps PROFILE_MEMORY = False try: subprocess.call( ["pdfinfo", "-h"], stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w") ) POPPLER_INSTALLED = True except OSError as e: if e.errno == errno.ENOENT: POPPLER_INSTALLED = False def profile(f): if PROFILE_MEMORY: @wraps(f) @profile_memory def wrapped(*args, **kwargs): r = f(*args, **kwargs) return r return wrapped else: @wraps(f) def wrapped(*args, **kwargs): r = f(*args, **kwargs) return r return wrapped def get_poppler_path(): return pathlib.Path( Popen(["which", "pdftoppm"], stdout=PIPE).communicate()[0].strip().decode() ).parent class PDFConversionMethods(unittest.TestCase): @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes(self): start_time = time.time() with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read()) self.assertTrue(len(images_from_bytes) == 1) print("test_conversion_from_bytes: {} sec".format(time.time() - start_time)) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path(self): start_time = time.time() images_from_path = convert_from_path("./tests/test.pdf") self.assertTrue(len(images_from_path) == 1) print("test_conversion_from_path: {} sec".format(time.time() - start_time)) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path ) self.assertTrue(len(images_from_bytes) == 1) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path("./tests/test.pdf", output_folder=path) self.assertTrue(len(images_from_path) == 1) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_14(self): start_time = time.time() with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read()) self.assertTrue(len(images_from_bytes) == 14) print( "test_conversion_from_bytes_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_14(self): start_time = time.time() images_from_path = convert_from_path("./tests/test_14.pdf") self.assertTrue(len(images_from_path) == 14) print( "test_conversion_from_path_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir_14(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path ) self.assertTrue(len(images_from_bytes) == 14) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_14(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test_14.pdf", output_folder=path ) self.assertTrue(len(images_from_path) == 14) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") @unittest.skipIf( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.", ) def test_conversion_from_bytes_241(self): # pragma: no cover start_time = time.time() with open("./tests/test_241.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read()) self.assertTrue(len(images_from_bytes) == 241) print( "test_conversion_from_bytes_241: {} sec".format( (time.time() - start_time) / 241.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") @unittest.skipIf( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.", ) def test_conversion_from_path_241(self): # pragma: no cover start_time = time.time() images_from_path = convert_from_path("./tests/test_241.pdf") self.assertTrue(len(images_from_path) == 241) print( "test_conversion_from_path_241: {} sec".format( (time.time() - start_time) / 241.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") @unittest.skipIf( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.", ) def test_conversion_from_bytes_using_dir_241(self): # pragma: no cover start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_241.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path ) self.assertTrue(len(images_from_bytes) == 241) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_241: {} sec".format( (time.time() - start_time) / 241.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") @unittest.skipIf( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.", ) def test_conversion_from_path_using_dir_241(self): # pragma: no cover start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test_241.pdf", output_folder=path ) self.assertTrue(len(images_from_path) == 241) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_241: {} sec".format( (time.time() - start_time) / 241.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_empty_if_not_pdf(self): start_time = time.time() with self.assertRaises(Exception): convert_from_path("./tests/test.jpg") print("test_empty_if_not_pdf: {} sec".format(time.time() - start_time)) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_empty_if_file_not_found(self): start_time = time.time() with self.assertRaises(Exception): convert_from_path("./tests/totally_a_real_file_in_folder.xyz") print("test_empty_if_file_not_found: {} sec".format(time.time() - start_time)) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_empty_if_corrupted_pdf(self): start_time = time.time() with self.assertRaises(Exception): convert_from_path("./tests/test_corrupted.pdf") print("test_empty_if_corrupted_pdf: {} sec".format(time.time() - start_time)) ## Test first page @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_14_first_page_12(self): start_time = time.time() with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), first_page=12) self.assertTrue(len(images_from_bytes) == 3) print( "test_conversion_from_bytes_14_last_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_14_first_page_12(self): start_time = time.time() images_from_path = convert_from_path("./tests/test_14.pdf", first_page=12) self.assertTrue(len(images_from_path) == 3) print( "test_conversion_from_path_14_first_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir_14_first_page_12(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, first_page=12 ) self.assertTrue(len(images_from_bytes) == 3) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_14_first_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_14_first_page_12(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test_14.pdf", output_folder=path, first_page=12 ) self.assertTrue(len(images_from_path) == 3) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_14_first_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test last page @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_14_last_page_12(self): start_time = time.time() with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), last_page=12) self.assertTrue(len(images_from_bytes) == 12) print( "test_conversion_from_bytes_14_last_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_14_last_page_12(self): start_time = time.time() images_from_path = convert_from_path("./tests/test_14.pdf", last_page=12) self.assertTrue(len(images_from_path) == 12) print( "test_conversion_from_path_14_last_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir_14_last_page_12(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, last_page=12 ) self.assertTrue(len(images_from_bytes) == 12) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_14_last_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_14_last_page_12(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test_14.pdf", output_folder=path, last_page=12 ) self.assertTrue(len(images_from_path) == 12) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_14_last_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test first and last page @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_14_first_page_2_last_page_12(self): start_time = time.time() with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), first_page=2, last_page=12 ) self.assertTrue(len(images_from_bytes) == 11) print( "test_conversion_from_bytes_14_first_page_2_last_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_14_first_page_2_last_page_12(self): start_time = time.time() images_from_path = convert_from_path( "./tests/test_14.pdf", first_page=2, last_page=12 ) self.assertTrue(len(images_from_path) == 11) print( "test_conversion_from_path_14_first_page_2_last_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir_14_first_page_2_last_page_12(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, first_page=2, last_page=12 ) self.assertTrue(len(images_from_bytes) == 11) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_14_first_page_2_last_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_14_first_page_2_last_page_12(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test_14.pdf", output_folder=path, first_page=2, last_page=12 ) self.assertTrue(len(images_from_path) == 11) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_14_first_page_2_last_page_12: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test output as jpeg @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_to_jpeg_from_bytes(self): start_time = time.time() with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), fmt="jpg") self.assertTrue(images_from_bytes[0].format == "JPEG") print( "test_conversion_to_jpeg_from_bytes_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_to_jpeg_from_path_using_dir(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test.pdf", output_folder=path, fmt="jpeg" ) self.assertTrue(images_from_path[0].format == "JPEG") [im.close() for im in images_from_path] print( "test_conversion_to_jpeg_from_path_using_dir_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test output as png @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_to_png_from_bytes(self): start_time = time.time() with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), fmt="png") self.assertTrue(images_from_bytes[0].format == "PNG") print( "test_conversion_to_png_from_bytes_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_to_png_from_path_using_dir(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test.pdf", output_folder=path, fmt="png" ) self.assertTrue(images_from_path[0].format == "PNG") [im.close() for im in images_from_path] print( "test_conversion_to_png_from_path_using_dir_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test output with not-empty output_folder @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_non_empty_output_folder(self): start_time = time.time() images_from_path = convert_from_path( "./tests/test.pdf", output_folder="./tests/" ) self.assertTrue(len(images_from_path) == 1) [im.close() for im in images_from_path] [os.remove(im.filename) for im in images_from_path] print( "test_non_empty_output_folder: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test format that starts with a dot @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_format_that_starts_with_a_dot(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, fmt=".jpg" ) self.assertTrue(len(images_from_bytes) == 1) [im.close() for im in images_from_bytes] print( "test_format_that_starts_with_a_dot: {} sec".format( time.time() - start_time ) ) ## Test locked PDF @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_locked_pdf_with_userpw_only(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_locked_user_only.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, fmt=".jpg", userpw="pdf2image" ) self.assertTrue(len(images_from_bytes) == 1) [im.close() for im in images_from_bytes] print( "test_locked_pdf_with_userpw_only: {} sec".format(time.time() - start_time) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_not_locked_pdf(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, fmt=".jpg", userpw="pdf2image" ) self.assertTrue(len(images_from_bytes) == 1) [im.close() for im in images_from_bytes] print( "test_locked_pdf_with_userpw_only: {} sec".format(time.time() - start_time) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_locked_pdf_with_ownerpw_only(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_locked_owner_only.pdf", "rb") as pdf_file: # No need to pass a ownerpw because the absence of userpw means we can read it anyway images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, fmt=".jpg" ) self.assertTrue(len(images_from_bytes) == 1) [im.close() for im in images_from_bytes] print( "test_locked_pdf_with_ownerpw_only: {} sec".format(time.time() - start_time) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_locked_pdf_with_ownerpw_and_userpw(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_locked_both.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, fmt=".jpg", userpw="pdf2image" ) self.assertTrue(len(images_from_bytes) == 1) [im.close() for im in images_from_bytes] print( "test_locked_pdf_with_ownerpw_and_userpw: {} sec".format( time.time() - start_time ) ) ## Tests cropbox @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_cropbox(self): start_time = time.time() with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), use_cropbox=True) self.assertTrue(len(images_from_bytes) == 1) print( "test_conversion_from_bytes_using_cropbox: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_cropbox(self): start_time = time.time() images_from_path = convert_from_path("./tests/test.pdf", use_cropbox=True) self.assertTrue(len(images_from_path) == 1) print( "test_conversion_from_path_using_cropbox: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir_and_cropbox(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, use_cropbox=True ) self.assertTrue(len(images_from_bytes) == 1) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_and_cropbox: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_and_cropbox(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test.pdf", output_folder=path, use_cropbox=True ) self.assertTrue(len(images_from_path) == 1) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_and_cropbox: {} sec".format( time.time() - start_time ) ) ## Tests multithreading @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_14_with_4_threads(self): start_time = time.time() with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), thread_count=4) self.assertTrue(len(images_from_bytes) == 14) print( "test_conversion_from_bytes_14_with_4_thread: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_14_with_4_threads(self): start_time = time.time() images_from_path = convert_from_path("./tests/test_14.pdf", thread_count=4) self.assertTrue(len(images_from_path) == 14) print( "test_conversion_from_path_14_with_4_thread: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_14_with_15_threads(self): start_time = time.time() with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), thread_count=15) self.assertTrue(len(images_from_bytes) == 14) print( "test_conversion_from_bytes_14_with_15_thread: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_14_with_0_threads(self): start_time = time.time() images_from_path = convert_from_path("./tests/test_14.pdf", thread_count=0) self.assertTrue(len(images_from_path) == 14) print( "test_conversion_from_path_14_with_4_thread: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir_14_with_4_threads(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, thread_count=4 ) self.assertTrue(len(images_from_bytes) == 14) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_14_with_4_thread: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_14_with_4_threads(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test_14.pdf", output_folder=path, thread_count=4 ) self.assertTrue(len(images_from_path) == 14) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_14_with_4_thread: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.", ) @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_241_with_4_threads(self): # pragma: no cover start_time = time.time() with open("./tests/test_241.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), thread_count=4) self.assertTrue(len(images_from_bytes) == 241) print( "test_conversion_from_bytes_241_with_4_thread: {} sec".format( (time.time() - start_time) / 241.0 ) ) @profile @unittest.skipIf( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.", ) @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_241_with_4_threads(self): # pragma: no cover start_time = time.time() images_from_path = convert_from_path("./tests/test_241.pdf", thread_count=4) self.assertTrue(len(images_from_path) == 241) print( "test_conversion_from_path_241_with_4_thread: {} sec".format( (time.time() - start_time) / 241.0 ) ) @profile @unittest.skipIf( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.", ) @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir_241_with_4_threads( self ): # pragma: no cover start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test_241.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, thread_count=4 ) self.assertTrue(len(images_from_bytes) == 241) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_241_with_4_thread: {} sec".format( (time.time() - start_time) / 241.0 ) ) @profile @unittest.skipIf( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.", ) @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_241_with_4_threads( self ): # pragma: no cover start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test_241.pdf", output_folder=path, thread_count=4 ) self.assertTrue(len(images_from_path) == 241) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_241_with_4_thread: {} sec".format( (time.time() - start_time) / 241.0 ) ) # Testing custom exceptions @unittest.skipIf(POPPLER_INSTALLED, "Poppler is installed, skipping.") def test_pdfinfo_not_installed_throws(self): start_time = time.time() try: images_from_path = convert_from_path("./tests/test_14.pdf") raise Exception("This should not happen") except PDFInfoNotInstalledError as ex: pass print( "test_pdfinfo_not_installed_throws: {} sec".format( (time.time() - start_time) / 14.0 ) ) @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_missingfonterror_throws(self): start_time = time.time() try: images_from_path = convert_from_path("./tests/test_strict.pdf", strict=True) raise Exception("This should not happen") except PDFSyntaxError as ex: pass print("test_syntaxerror_throws: {} sec".format(time.time() - start_time)) # Test transparent @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_transparent(self): start_time = time.time() with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), transparent=True, fmt="png" ) self.assertTrue(len(images_from_bytes) == 1) print( "test_conversion_from_bytes_using_transparent: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_transparent(self): start_time = time.time() images_from_path = convert_from_path( "./tests/test.pdf", transparent=True, fmt="png" ) self.assertTrue(len(images_from_path) == 1) print( "test_conversion_from_path_using_transparent: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir_and_transparent(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, transparent=True, fmt="png" ) self.assertTrue(len(images_from_bytes) == 1) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_and_transparent: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_and_transparent(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test.pdf", output_folder=path, transparent=True, fmt="png" ) self.assertTrue(len(images_from_path) == 1) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_and_transparent: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_transparent_without_png(self): start_time = time.time() images_from_path = convert_from_path("./tests/test.pdf", transparent=True) self.assertTrue(len(images_from_path) == 1) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_transparent_without_png: {} sec".format( time.time() - start_time ) ) ## Test output as TIFF @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_to_tiff_from_bytes(self): start_time = time.time() with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), fmt="tiff") self.assertTrue(images_from_bytes[0].format == "TIFF") print( "test_conversion_to_tiff_from_bytes_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_to_tiff_from_path_using_dir(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test.pdf", output_folder=path, fmt="tiff" ) self.assertTrue(images_from_path[0].format == "TIFF") [im.close() for im in images_from_path] print( "test_conversion_to_tiff_from_path_using_dir_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test hanging file handles @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") @unittest.skipIf(not os.name == "posix", "This test only works on posix systems") def test_close_tempfile_after_conversion(self): start_time = time.time() with open("./tests/test.pdf", "rb") as pdf_file: fd_count_before = len( subprocess.check_output( ["ls", "-l", "/proc/" + str(os.getpid()) + "/fd"] ) .decode("utf8") .split("\n") ) pdf_data = pdf_file.read() images_from_bytes = [] for i in range(50): images_from_bytes.extend(convert_from_bytes(pdf_data)) # Closing the images [im.close() for im in images_from_bytes] pid = os.getpid() fd_count_after = len( subprocess.check_output( ["ls", "-l", "/proc/" + str(os.getpid()) + "/fd"] ) .decode("utf8") .split("\n") ) # Add an error margin self.assertTrue(abs(fd_count_before - fd_count_after) <= 3) print( "test_close_tempfile_after_conversion: {} sec".format( (time.time() - start_time) ) ) ## Test poppler_path @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") @unittest.skipIf(not os.name == "posix", "This test only works on posix systems") def test_use_poppler_path(self): os.mkdir("./bin") shutil.copy("/usr/bin/pdftoppm", "./bin") shutil.copy("/usr/bin/pdfinfo", "./bin") start_time = time.time() try: images_from_path = convert_from_path( "./tests/test.pdf", poppler_path="./bin" ) finally: shutil.rmtree("./bin") self.assertTrue(len(images_from_path) == 1) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_poppler_path: {} sec".format( (time.time() - start_time) ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") @unittest.skipIf(not os.name == "posix", "This test only works on posix systems") def test_use_poppler_path_with_trailing_slash(self): os.mkdir("./bin") shutil.copy("/usr/bin/pdftoppm", "./bin") shutil.copy("/usr/bin/pdfinfo", "./bin") start_time = time.time() try: images_from_path = convert_from_path( "./tests/test.pdf", poppler_path="./bin/" ) finally: shutil.rmtree("./bin") self.assertTrue(len(images_from_path) == 1) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_poppler_path_with_trailing_slash: {} sec".format( (time.time() - start_time) ) ) ## Test first page greater or equal to last_page @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_14_first_page_1_last_page_1(self): start_time = time.time() images_from_path = convert_from_path( "./tests/test_14.pdf", first_page=1, last_page=1 ) self.assertTrue(len(images_from_path) == 1) print( "test_conversion_from_path_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_14_first_page_12_last_page_1(self): start_time = time.time() images_from_path = convert_from_path( "./tests/test_14.pdf", first_page=12, last_page=1 ) self.assertTrue(len(images_from_path) == 0) print( "test_conversion_from_path_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test singlefile @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_bytes_using_dir_single_file(self): start_time = time.time() with TemporaryDirectory() as path: with open("./tests/test.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes( pdf_file.read(), output_folder=path, output_file="test", single_file=True, ) self.assertTrue(len(images_from_bytes) == 1) self.assertTrue( images_from_bytes[0].filename == os.path.join(path, "test.ppm") ) [im.close() for im in images_from_bytes] print( "test_conversion_from_bytes_using_dir_single_file: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_single_file(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test.pdf", output_folder=path, output_file="test", single_file=True, ) self.assertTrue(len(images_from_path) == 1) self.assertTrue( images_from_path[0].filename == os.path.join(path, "test.ppm") ) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_single_file: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_14_single_file(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test_14.pdf", output_folder=path, output_file="test", single_file=True, ) self.assertTrue(len(images_from_path) == 1) self.assertTrue( images_from_path[0].filename == os.path.join(path, "test.ppm") ) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_14_single_file: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test file with same name in directory @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_using_dir_with_containing_file_with_same_name(self): start_time = time.time() with TemporaryDirectory() as path: shutil.copyfile("./tests/test.pdf", os.path.join(path, "test.pdf")) images_from_path = convert_from_path( "./tests/test.pdf", output_folder=path, output_file="test" ) self.assertTrue(len(images_from_path) == 1) self.assertTrue( images_from_path[0].filename == os.path.join(path, "test0001-1.ppm") ) [im.close() for im in images_from_path] print( "test_conversion_from_path_using_dir_single_file: {} sec".format( time.time() - start_time ) ) ## Test grayscale option @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_to_grayscale_from_bytes(self): start_time = time.time() with open("./tests/test_14.pdf", "rb") as pdf_file: images_from_bytes = convert_from_bytes(pdf_file.read(), grayscale=True) self.assertTrue(images_from_bytes[0].mode == "L") print( "test_conversion_to_grayscale_from_bytes_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_to_grayscale_from_path(self): start_time = time.time() images_from_path = convert_from_path("./tests/test_14.pdf", grayscale=True) self.assertTrue(images_from_path[0].mode == "L") [im.close() for im in images_from_path] print( "test_conversion_to_grayscale_from_path_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_to_grayscale_from_path_using_dir(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( "./tests/test_14.pdf", output_folder=path, grayscale=True ) self.assertTrue(images_from_path[0].mode == "L") [im.close() for im in images_from_path] print( "test_conversion_to_grayscale_from_path_using_dir_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test pathlib support @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_pathlib_path_using_dir(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( pathlib.Path("./tests/test.pdf"), output_folder=pathlib.Path(path), poppler_path=get_poppler_path(), ) self.assertTrue(len(images_from_path) == 1) [im.close() for im in images_from_path] print( "test_conversion_from_pathlib_path_using_dir: {} sec".format( time.time() - start_time ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_pathlib_path_14(self): start_time = time.time() images_from_path = convert_from_path(pathlib.Path("./tests/test_14.pdf")) self.assertTrue(len(images_from_path) == 14) print( "test_conversion_from_pathlib_path_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_pathlib_path_using_dir_14(self): start_time = time.time() with TemporaryDirectory() as path: images_from_path = convert_from_path( pathlib.Path("./tests/test_14.pdf"), output_folder=pathlib.Path(path), poppler_path=get_poppler_path(), ) self.assertTrue(len(images_from_path) == 14) [im.close() for im in images_from_path] print( "test_conversion_from_pathlib_path_using_dir_14: {} sec".format( (time.time() - start_time) / 14.0 ) ) ## Test size parameter @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_with_int_size(self): start_time = time.time() images_from_path = convert_from_path("./tests/test.pdf", size=400) self.assertTrue(images_from_path[0].size[1] == 400) self.assertTrue(len(images_from_path) == 1) print("test_conversion_from_path_with_int_size: {} sec".format(time.time() - start_time)) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_with_1d_tuple_size(self): start_time = time.time() images_from_path = convert_from_path("./tests/test.pdf", size=(400,)) self.assertTrue(images_from_path[0].size[1] == 400) self.assertTrue(len(images_from_path) == 1) print("test_conversion_from_path_with_1d_tuple_size: {} sec".format(time.time() - start_time)) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_with_2d_tuple_size(self): start_time = time.time() images_from_path = convert_from_path("./tests/test.pdf", size=(400, 400)) self.assertTrue(images_from_path[0].size == (400, 400)) self.assertTrue(len(images_from_path) == 1) print("test_conversion_from_path_with_2d_tuple_size: {} sec".format(time.time() - start_time)) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_with_invalid_size(self): start_time = time.time() try: images_from_path = convert_from_path("./tests/test.pdf", size='bad value') raise Exception("This should not happen") except ValueError: pass print("test_conversion_from_path_with_invalid_size: {} sec".format(time.time() - start_time)) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_with_2d_tuple_size_with_None_width(self): start_time = time.time() images_from_path = convert_from_path("./tests/test.pdf", size=(None, 400)) self.assertTrue(images_from_path[0].size[0] == 310) self.assertTrue(images_from_path[0].size[1] == 400) self.assertTrue(len(images_from_path) == 1) print("test_conversion_from_path_with_2d_tuple_size_with_None_width: {} sec".format(time.time() - start_time)) @profile @unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!") def test_conversion_from_path_with_2d_tuple_size_with_None_height(self): start_time = time.time() images_from_path = convert_from_path("./tests/test.pdf", size=(400, None)) self.assertTrue(images_from_path[0].size[0] == 400) self.assertTrue(images_from_path[0].size[1] == 518) self.assertTrue(len(images_from_path) == 1) print("test_conversion_from_path_with_2d_tuple_size_with_None_width: {} sec".format(time.time() - start_time)) if __name__ == "__main__": unittest.main()
38.989521
118
0.606969
6,308
52,090
4.677077
0.03884
0.068061
0.058841
0.068637
0.941192
0.931804
0.922482
0.89862
0.889469
0.879673
0
0.019532
0.285448
52,090
1,335
119
39.018727
0.773112
0.014398
0
0.600336
0
0
0.168304
0.071752
0
0
0
0
0.074727
1
0.071369
false
0.002519
0.012594
0.00084
0.089001
0.06801
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1650d598e0eb88f70dbb9f582275f1e837e6057e
85
py
Python
simple_ciphers/utils/strings.py
MarcDillar/python-cipher
7a5e5a74c26b91e57d68b5a03aeb1c53be222da0
[ "MIT" ]
null
null
null
simple_ciphers/utils/strings.py
MarcDillar/python-cipher
7a5e5a74c26b91e57d68b5a03aeb1c53be222da0
[ "MIT" ]
null
null
null
simple_ciphers/utils/strings.py
MarcDillar/python-cipher
7a5e5a74c26b91e57d68b5a03aeb1c53be222da0
[ "MIT" ]
null
null
null
import re def remove_non_letters(s): return re.compile('[^a-zA-Z]').sub('', s)
14.166667
45
0.623529
15
85
3.4
0.866667
0
0
0
0
0
0
0
0
0
0
0
0.152941
85
5
46
17
0.708333
0
0
0
0
0
0.105882
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
167f8de300bb62859fc9b7f4f45bacd759b25edf
166
py
Python
tests/helpers.py
bankroll-py/bankroll-broker-etrade
3f8ba0abbbcdcf5d42ec88fbe618a6218fbf304b
[ "MIT" ]
4
2019-10-08T18:18:56.000Z
2022-02-10T19:23:51.000Z
tests/helpers.py
bankroll-py/bankroll-broker-etrade
3f8ba0abbbcdcf5d42ec88fbe618a6218fbf304b
[ "MIT" ]
null
null
null
tests/helpers.py
bankroll-py/bankroll-broker-etrade
3f8ba0abbbcdcf5d42ec88fbe618a6218fbf304b
[ "MIT" ]
null
null
null
from decimal import Decimal from bankroll.model import Cash, Currency def cashUSD(amount: Decimal) -> Cash: return Cash(currency=Currency.USD, quantity=amount)
23.714286
55
0.777108
22
166
5.863636
0.590909
0.186047
0
0
0
0
0
0
0
0
0
0
0.138554
166
6
56
27.666667
0.902098
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.5
0.25
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
16949c6492eb507a09848dc20bbfc0eb739c52a6
63
py
Python
pyfuseki/syncFuseki/__init__.py
yubinCloud/pyfuseki
15dcd8f313cdd346aadd62c9ab40f23ecc06e4bd
[ "Apache-2.0" ]
21
2021-01-20T08:07:52.000Z
2022-01-26T05:11:24.000Z
pyfuseki/syncFuseki/__init__.py
yubinCloud/pyfuseki
15dcd8f313cdd346aadd62c9ab40f23ecc06e4bd
[ "Apache-2.0" ]
1
2022-01-13T03:29:24.000Z
2022-01-15T08:10:24.000Z
pyfuseki/syncFuseki/__init__.py
yubinCloud/pyfuseki
15dcd8f313cdd346aadd62c9ab40f23ecc06e4bd
[ "Apache-2.0" ]
3
2021-01-24T17:26:02.000Z
2021-05-29T05:00:15.000Z
""" @Time: 2021/1/20 17:12 @Author: @File: __init__.py.py """
9
22
0.587302
11
63
3
0.909091
0
0
0
0
0
0
0
0
0
0
0.203704
0.142857
63
6
23
10.5
0.407407
0.84127
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
6
16a6072c9d04b7ed615bcd226bb73d5ba351cd43
43,836
py
Python
downstream/UNITER/model/pretrain_vcr.py
yeonseok-jeong-cm/multimodal_research
bb1140f13f76d4cda6175a072806a0ee0908bd0d
[ "MIT" ]
null
null
null
downstream/UNITER/model/pretrain_vcr.py
yeonseok-jeong-cm/multimodal_research
bb1140f13f76d4cda6175a072806a0ee0908bd0d
[ "MIT" ]
null
null
null
downstream/UNITER/model/pretrain_vcr.py
yeonseok-jeong-cm/multimodal_research
bb1140f13f76d4cda6175a072806a0ee0908bd0d
[ "MIT" ]
null
null
null
from .pretrain import UniterForPretraining from torch import nn from .layer import BertOnlyMLMHead from collections import defaultdict from torch.nn import functional as F import torch import numpy as np from .do_calculus import Matcher, BoxCoder, BalancedPositiveNegativeSampler, FastRCNNLossComputation from horovod import torch as hvd class UniterForPretrainingForVCR(UniterForPretraining): """ 2nd Stage Pretrain UNITER for VCR """ def init_type_embedding(self): new_emb = nn.Embedding(4, self.uniter.config.hidden_size) new_emb.apply(self.init_weights) for i in [0, 1]: emb = self.uniter.embeddings.token_type_embeddings.weight.data[i, :] new_emb.weight.data[i, :].copy_(emb) emb = self.uniter.embeddings.token_type_embeddings.weight.data[0, :] new_emb.weight.data[2, :].copy_(emb) new_emb.weight.data[3, :].copy_(emb) self.uniter.embeddings.token_type_embeddings = new_emb def init_word_embedding(self, num_special_tokens): orig_word_num = self.uniter.embeddings.word_embeddings.weight.size(0) new_emb = nn.Embedding( orig_word_num + num_special_tokens, self.uniter.config.hidden_size) new_emb.apply(self.init_weights) emb = self.uniter.embeddings.word_embeddings.weight.data new_emb.weight.data[:orig_word_num, :].copy_(emb) self.uniter.embeddings.word_embeddings = new_emb self.cls = BertOnlyMLMHead( self.uniter.config, self.uniter.embeddings.word_embeddings.weight) self.causal_predictor_t = BertOnlyMLMHead(self.uniter.config, self.uniter.embeddings.word_embeddings.weight) def forward(self, batch, task, compute_loss=True): batch = defaultdict(lambda: None, batch) input_ids = batch['input_ids'] position_ids = batch['position_ids'] img_feat = batch['img_feat'] img_pos_feat = batch['img_pos_feat'] attention_mask = batch['attn_masks'] gather_index = batch['gather_index'] txt_type_ids = batch['txt_type_ids'] txt_lens = batch['txt_lens'] num_bbs = batch['num_bbs'] img_soft_labels = batch['img_soft_labels'] if task == 'mlm': txt_labels = batch['txt_labels'] causal_labels = batch['causal_labels'] ''' return self.forward_mlm(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, txt_lens, num_bbs, img_soft_labels, compute_loss) return self.forward_mlm_dc(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss) return self.forward_mlm_dc_all(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss) ''' return self.forward_mlm_dc_unmasked(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss) elif task == 'mrfr': img_mask_tgt = batch['img_mask_tgt'] img_masks = batch['img_masks'] mrfr_feat_target = batch['feat_targets'] ### pretrain by vc_feat vc_feat = batch['vc_feat'] mrfr_vc_feat_target = batch['vc_feat_targets'] return self.forward_mrfr(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrfr_feat_target, vc_feat, mrfr_vc_feat_target, txt_lens, num_bbs, img_soft_labels, compute_loss) ''' return self.forward_mrfr_vc(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrfr_feat_target, vc_feat, mrfr_vc_feat_target, txt_lens, num_bbs, img_soft_labels, compute_loss) ''' elif task.startswith('mrc'): img_mask_tgt = batch['img_mask_tgt'] img_masks = batch['img_masks'] mrc_label_target = batch['label_targets'] ### make label for unmasked object token (for 1_2) img_unmask_tgt = batch['img_unmask_tgt'] mrc_label_target_unmasked = batch['label_targets_unmasked'] ### ''' return self.forward_mrc(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_mrc_dc(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_mrc_dc_all(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) ''' return self.forward_mrc_dc_unmasked(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, img_unmask_tgt, mrc_label_target_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss) elif task.startswith('dc'): img_mask_tgt = batch['img_mask_tgt'] img_masks = batch['img_masks'] mrc_label_target = batch['label_targets'] ''' return self.forward_dc_1(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_dc_2(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_dc_3(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_dc_4(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) return self.forward_dc_5(input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, mrc_label_target, txt_lens, num_bbs, img_soft_labels, task, compute_loss) ''' raise ValueError('dc is invalid task now') else: raise ValueError('invalid task') ### use 'do-calculus' in UNITER pretrain : make method def do_calculus_1(self, sequence_output, img_feats, proposals, txt_lens, num_bbs): """ Arguments: - sequence_output : "img + txt" output Returns: """ image_uniter_outputs = [] img_feat_list = [] i = 0 for (output, txt_len) in zip(sequence_output, txt_lens): image_uniter_output = output[txt_len:txt_len+num_bbs[i]] image_uniter_outputs.append(image_uniter_output) i+=1 for (img_feat, num_bb) in zip(img_feats, num_bbs): real_img_feat = img_feat[:num_bb] img_feat_list.append(real_img_feat) assert len(image_uniter_outputs) == len(num_bbs) assert len(image_uniter_outputs) == len(img_feat_list) # class_logits_list = [self.predictor(self_feature) for self_feature in image_uniter_outputs] zs = [self.causal_predictor(img_feat_list[i], [num_bbs[i]]) for i in range(len(image_uniter_outputs))] return image_uniter_outputs, zs def do_calculus_loss_1(self, class_logits_causal_list, proposals, img_soft_labels, compute_loss): matcher = Matcher( 0.7, # cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.7 0.3, # cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.3 allow_low_quality_matches=False, ) bbox_reg_weights = (10., 10., 5., 5.) # cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10., 10., 5., 5.) box_coder = BoxCoder(weights=bbox_reg_weights) fg_bg_sampler = BalancedPositiveNegativeSampler( 512, 0.25 ) # cfg = MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # cfg = MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 cls_agnostic_bbox_reg = False # cfg.MODEL.CLS_AGNOSTIC_BBOX_REG = False loss_evaluator = FastRCNNLossComputation( matcher, fg_bg_sampler, box_coder, cls_agnostic_bbox_reg ) loss_causal, prediction_list = loss_evaluator( class_logits_causal_list, proposals, img_soft_labels, compute_loss ) return loss_causal, prediction_list ### ### use 'do-calculus' in UNITER pretrain version 2 : make method def do_calculus_2(self, sequence_output, img_feats, proposals, txt_lens, num_bbs): image_uniter_outputs = [] i = 0 for (output, txt_len) in zip(sequence_output, txt_lens): image_uniter_output = output[txt_len:txt_len+num_bbs[i]] image_uniter_outputs.append(image_uniter_output) i+=1 assert len(image_uniter_outputs) == len(num_bbs) # class_logits_list = [self.predictor(self_feature) for self_feature in image_uniter_outputs] zs = [self.causal_predictor_2(image_uniter_outputs[i], [num_bbs[i]]) for i in range(len(image_uniter_outputs))] return zs # MLM def forward_mlm(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, txt_lens, num_bbs, img_soft_labels, compute_loss=True): sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) ''' ### use 'do-calculus' in UNITER pretrain : compute loss class_logits, class_logits_causal = self.do_calculus(sequence_output, img_pos_feat, txt_lens, num_bbs) loss_classifier, loss_causal = self.do_calculus_loss(class_logits, class_logits_causal, img_pos_feat, img_soft_labels) ### ''' # get only the text part sequence_output = sequence_output[:, :input_ids.size(1), :] # only compute masked tokens for better efficiency masked_output = self._compute_masked_hidden(sequence_output, txt_labels != -1) prediction_scores = self.cls(masked_output) if compute_loss: masked_lm_loss = F.cross_entropy(prediction_scores, txt_labels[txt_labels != -1], reduction='none') return masked_lm_loss #, loss_classifier, loss_causal else: return prediction_scores # MLM_DC def forward_mlm_dc(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss=True): causal_output, masked_output_dc, prediction_scores_dc, causal_loss = 0, 0, 0, 0 sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) # get only the text part sequence_output = sequence_output[:, :input_ids.size(1), :] # only compute masked tokens for better efficiency # 1> masked token causal_output = self.causal_t(sequence_output) masked_output = self._compute_masked_hidden(causal_output, txt_labels != -1) prediction_scores = self.cls(masked_output) # DeVLBert도 같은 꼬리인가? ''' # 2> unmasked token, causal inference unmasked_output_dc = self._compute_masked_hidden(causal_output, causal_labels != -1) # 아래 line에서 error 발생 if unmasked_output_dc.size(0) == 0: prediction_scores_dc = None else: prediction_scores_dc = self.causal_predictor_t(unmasked_output_dc) ''' if compute_loss: # masked_lm_loss = F.cross_entropy(prediction_scores, txt_labels[txt_labels != -1], reduction='none') if prediction_scores_dc is not None: masked_lm_loss = F.cross_entropy(prediction_scores, txt_labels[txt_labels != -1], reduction='none') return masked_lm_loss #, loss_classifier, loss_causal else: return prediction_scores # MLM_DC def forward_mlm_dc_all(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss=True): causal_output, masked_output_dc, prediction_scores_dc, causal_loss = 0, 0, 0, 0 sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) # get only the text part sequence_output = sequence_output[:, :input_ids.size(1), :] # only compute masked tokens for better efficiency # 1> masked token causal_output = self.causal_t(sequence_output) masked_output = self._compute_masked_hidden(causal_output, txt_labels != -1) prediction_scores = self.cls(masked_output) # DeVLBert도 같은 꼬리인가? # 2> unmasked token, causal inference unmasked_output_dc = self._compute_masked_hidden(causal_output, causal_labels != -1) # 아래 line에서 error 발생 if unmasked_output_dc.size(0) == 0: prediction_scores_dc = None else: prediction_scores_dc = self.causal_predictor_t(unmasked_output_dc) if compute_loss: masked_lm_loss = F.cross_entropy(prediction_scores, txt_labels[txt_labels != -1], reduction='none') if prediction_scores_dc is not None: causal_loss = F.cross_entropy(prediction_scores_dc, causal_labels[causal_labels != -1], reduction='none') masked_lm_loss = torch.cat([masked_lm_loss, causal_loss], dim=0) return masked_lm_loss #, loss_classifier, loss_causal else: return prediction_scores # MLM_DC def forward_mlm_dc_unmasked(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, txt_labels, causal_labels, txt_lens, num_bbs, img_soft_labels, compute_loss=True): causal_output, masked_output_dc, prediction_scores_dc, causal_loss = 0, 0, 0, 0 sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) # get only the text part sequence_output = sequence_output[:, :input_ids.size(1), :] # only compute masked tokens for better efficiency # 1> masked token, likelihood inference masked_output = self._compute_masked_hidden(sequence_output, txt_labels != -1) prediction_scores = self.cls(masked_output) # DeVLBert도 같은 꼬리인가? # 2> unmasked token, causal inference causal_output = self.causal_t(sequence_output) unmasked_output_dc = self._compute_masked_hidden(causal_output, causal_labels != -1) # 아래 line에서 error 발생 if unmasked_output_dc.size(0) == 0: prediction_scores_dc = None else: prediction_scores_dc = self.causal_predictor_t(unmasked_output_dc) if compute_loss: masked_lm_loss = F.cross_entropy(prediction_scores, txt_labels[txt_labels != -1], reduction='none') if prediction_scores_dc is not None: causal_loss = F.cross_entropy(prediction_scores_dc, causal_labels[causal_labels != -1], reduction='none') masked_lm_loss = torch.cat([masked_lm_loss, causal_loss], dim=0) return masked_lm_loss #, loss_classifier, loss_causal else: return prediction_scores # MRFR def forward_mrfr(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, feat_targets, vc_feat, mrfr_vc_feat_target, txt_lens, num_bbs, img_soft_labels, compute_loss=True): sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, img_masks=img_masks, txt_type_ids=txt_type_ids) ''' ### use 'do-calculus' in UNITER pretrain : compute loss class_logits, class_logits_causal = self.do_calculus(sequence_output, img_pos_feat, txt_lens, num_bbs) loss_classifier, loss_causal = self.do_calculus_loss(class_logits, class_logits_causal, img_pos_feat, img_soft_labels) ### ''' # only compute masked tokens for better efficiency masked_output = self._compute_masked_hidden(sequence_output, img_mask_tgt) prediction_feat = self.feat_regress(masked_output) if compute_loss: mrfr_loss = F.mse_loss(prediction_feat, feat_targets, reduction='none') return mrfr_loss#, loss_classifier, loss_causal else: return prediction_feat # MRFR def forward_mrfr_vc(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, feat_targets, vc_feat, mrfr_vc_feat_target, txt_lens, num_bbs, img_soft_labels, compute_loss=True): sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, img_masks=img_masks, txt_type_ids=txt_type_ids) ''' ### use 'do-calculus' in UNITER pretrain : compute loss class_logits, class_logits_causal = self.do_calculus(sequence_output, img_pos_feat, txt_lens, num_bbs) loss_classifier, loss_causal = self.do_calculus_loss(class_logits, class_logits_causal, img_pos_feat, img_soft_labels) ### ''' #import ipdb;ipdb.set_trace(context=10) # only compute masked tokens for better efficiency masked_output = self._compute_masked_hidden(sequence_output, img_mask_tgt) if vc_feat.shape[-1]==1024: prediction_feat = self.feat_regress_vc(masked_output) feat_targets = mrfr_vc_feat_target else: prediction_feat = self.feat_regress(masked_output) if compute_loss: mrfr_loss = F.mse_loss(prediction_feat, feat_targets, reduction='none') return mrfr_loss#, loss_classifier, loss_causal else: return prediction_feat # MRC def forward_mrc(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, img_unmask_tgt, label_targets_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss=True): sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, img_masks=img_masks, txt_type_ids=txt_type_ids) ''' ### use 'do-calculus' in UNITER pretrain : compute loss class_logits, class_logits_causal = self.do_calculus(sequence_output, img_pos_feat, txt_lens, num_bbs) loss_classifier, loss_causal = self.do_calculus_loss(class_logits, class_logits_causal, img_pos_feat, img_soft_labels) ### ''' # only compute masked regions for better efficiency masked_output = self._compute_masked_hidden(sequence_output, img_mask_tgt) prediction_soft_label = self.region_classifier(masked_output) if compute_loss: if "kl" in task: prediction_soft_label = F.log_softmax( prediction_soft_label, dim=-1) mrc_loss = F.kl_div( prediction_soft_label, label_targets, reduction='none') else: # background class should not be the target label_targets = torch.max(label_targets[:, 1:], dim=-1)[1] + 1 mrc_loss = F.cross_entropy( prediction_soft_label, label_targets, ignore_index=0, reduction='none') return mrc_loss #, loss_classifier, loss_causal else: return prediction_soft_label # MRC_DC def forward_mrc_dc(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, img_unmask_tgt, label_targets_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss=True): sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, img_masks=img_masks, txt_type_ids=txt_type_ids) # 1> masked object, causal inference causal_output = self.causal_v(sequence_output) masked_output = self._compute_masked_hidden(causal_output, img_mask_tgt) prediction_soft_label = self.causal_predictor_v(masked_output) ''' # only compute masked regions for better efficiency masked_output = self._compute_masked_hidden(sequence_output, img_mask_tgt) prediction_soft_label = self.region_classifier(masked_output) ''' if compute_loss: if "kl" in task: prediction_soft_label = F.log_softmax( prediction_soft_label, dim=-1) mrc_loss = F.kl_div( prediction_soft_label, label_targets, reduction='none') else: # background class should not be the target label_targets = torch.max(label_targets[:, 1:], dim=-1)[1] + 1 mrc_loss = F.cross_entropy( prediction_soft_label, label_targets, ignore_index=0, reduction='none') return mrc_loss #, loss_classifier, loss_causal else: return prediction_soft_label # MRC_DC def forward_mrc_dc_all(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, img_unmask_tgt, label_targets_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss=True): sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, img_masks=img_masks, txt_type_ids=txt_type_ids) # 1> masked object, causal inference causal_output = self.causal_v(sequence_output) masked_output = self._compute_masked_hidden(causal_output, img_mask_tgt) prediction_soft_label = self.causal_predictor_v(masked_output) # 2> unmasked object, causal inference unmasked_output = self._compute_masked_hidden(causal_output, img_unmask_tgt) prediction_soft_label_unmasked = self.causal_predictor_v(unmasked_output) if compute_loss: if "kl" in task: prediction_soft_label = F.log_softmax( prediction_soft_label, dim=-1) mrc_loss = F.kl_div( prediction_soft_label, label_targets, reduction='none') prediction_soft_label_unmasked = F.log_softmax( prediction_soft_label_unmasked, dim=-1) mrc_loss_unmasked = F.kl_div( prediction_soft_label_unmasked, label_targets_unmasked, reduction='none') mrc_loss = torch.cat([mrc_loss, mrc_loss_unmasked], dim=0) else: # background class should not be the target label_targets = torch.max(label_targets[:, 1:], dim=-1)[1] + 1 mrc_loss = F.cross_entropy( prediction_soft_label, label_targets, ignore_index=0, reduction='none') label_targets_unmasked = torch.max(label_targets_unmasked[:, 1:], dim=-1)[1] + 1 mrc_loss_unmasked = F.cross_entropy( prediction_soft_label_unmasked, label_targets_unmasked, ignore_index=0, reduction='none') mrc_loss = torch.cat([mrc_loss, mrc_loss_unmasked], dim=0) return mrc_loss #, loss_classifier, loss_causal else: return prediction_soft_label def forward_mrc_dc_unmasked(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, img_unmask_tgt, label_targets_unmasked, txt_lens, num_bbs, img_soft_labels, task, compute_loss=True): sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, img_masks=img_masks, txt_type_ids=txt_type_ids) # 1> masked object, likelihood inference masked_output = self._compute_masked_hidden(sequence_output, img_mask_tgt) prediction_soft_label = self.region_classifier(masked_output) # 2> unmasked object, causal inference causal_output = self.causal_v(sequence_output) unmasked_output = self._compute_masked_hidden(causal_output, img_unmask_tgt) prediction_soft_label_unmasked = self.causal_predictor_v(unmasked_output) if compute_loss: if "kl" in task: '''prediction_soft_label = F.log_softmax( prediction_soft_label, dim=-1) mrc_loss = F.kl_div( prediction_soft_label, label_targets, reduction='none')''' prediction_soft_label_unmasked = F.log_softmax( prediction_soft_label_unmasked, dim=-1) mrc_loss_unmasked = F.kl_div( prediction_soft_label_unmasked, label_targets_unmasked, reduction='none') mrc_loss = mrc_loss_unmasked else: # background class should not be the target '''label_targets = torch.max(label_targets[:, 1:], dim=-1)[1] + 1 mrc_loss = F.cross_entropy( prediction_soft_label, label_targets, ignore_index=0, reduction='none')''' label_targets_unmasked = torch.max(label_targets_unmasked[:, 1:], dim=-1)[1] + 1 mrc_loss_unmasked = F.cross_entropy( prediction_soft_label_unmasked, label_targets_unmasked, ignore_index=0, reduction='none') mrc_loss = mrc_loss_unmasked return mrc_loss #, loss_classifier, loss_causal else: return prediction_soft_label # DC 1 (Do-Calculus 1) def forward_dc_1(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, txt_lens, num_bbs, img_soft_labels, task, compute_loss=True): sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) ### use 'do-calculus' in UNITER pretrain : compute loss device = img_pos_feat.device image_uniter_outputs, zs = self.do_calculus(sequence_output, img_feat, img_pos_feat, txt_lens, num_bbs) batch_zs = pad_tensors(zs, num_bbs).to(device) attention_mask_list, gather_index_list = [], [] for i in range(input_ids.size(0)): attention_mask_list.append(attention_mask[i][txt_lens[i]:]) #gather_index_list.append(gather_index[i][txt_lens[i]:]) #attention_mask = attention_mask[:, input_ids.size(1):] #gather_index = gather_index[:, input_ids.size(1):] attention_mask = pad_tensors(attention_mask_list, num_bbs).to(device) # gather_index = pad_tensors(gather_index_list, num_bbs).to(device) gather_index = gather_index[:, input_ids.size(1)] zs_output, _ = self.uniter(None, position_ids, batch_zs, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) causal_logits_list = [] yzs = [] i = 0 # 밑에서 일렬로 쫙 펴버리자, 그러면서 label도 펴버리자, 그리고 한 번에 loss를 계산하자 for (uniter_output, z_output) in zip(image_uniter_outputs, zs_output): z = z_output[:num_bbs[i]] i += 1 assert len(uniter_output) == len(z) length = len(uniter_output) yz = torch.cat((self.Wx(uniter_output).unsqueeze(1).repeat(1, length, 1), z.unsqueeze(0).repeat(length, 1, 1)), 2).view(-1, 2*self.Wx(uniter_output).size(1)) yzs.append(yz) causal_logits_list.append(self.causal_score(yz)) if compute_loss: loss_causal = self.do_calculus_loss(causal_logits_list, img_pos_feat, img_soft_labels, compute_loss) return loss_causal else: prediction_soft_label = self.do_calculus_loss(causal_logits_list, img_pos_feat, img_soft_labels, compute_loss) return prediction_soft_label ### # DC 2 (Do-Calculus 2) def forward_dc_2(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, txt_lens, num_bbs, img_soft_labels, task, compute_loss=True): sequence_output, _ = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) # # sequence_img_output = sequence_output[:, input_ids.size(1):, :]; import ipdb;ipdb.set_trace(context=10) sequence_img_output = [] for i, sequence in enumerate(sequence_output): sequence_img_output.append(sequence[txt_lens[i]:, :]) class_logits_causal_list = self.causal_predictor_2(sequence_img_output, num_bbs) ### use 'do-calculus' in UNITER pretrain : compute loss device = img_pos_feat.device # batch_zs = pad_tensors(zs, num_bbs).to(device) ''' attention_mask_list, gather_index_list = [], [] for i in range(input_ids.size(0)): attention_mask_list.append(attention_mask[i][txt_lens[i]:]) #gather_index_list.append(gather_index[i][txt_lens[i]:]) #attention_mask = attention_mask[:, input_ids.size(1):] #gather_index = gather_index[:, input_ids.size(1):] attention_mask = pad_tensors(attention_mask_list, num_bbs).to(device) # gather_index = pad_tensors(gather_index_list, num_bbs).to(device) gather_index = gather_index[:, input_ids.size(1)] zs_output = self.uniter(None, position_ids, batch_zs, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) causal_logits_list = [] yzs = [] i = 0 # 밑에서 일렬로 쫙 펴버리자, 그러면서 label도 펴버리자, 그리고 한 번에 loss를 계산하자 for (uniter_output, z_output) in zip(image_uniter_outputs, zs_output): z = z_output[:num_bbs[i]] i += 1 assert len(uniter_output) == len(z) length = len(uniter_output) yz = torch.cat((self.Wx_1(uniter_output).unsqueeze(1).repeat(1, length, 1), z.unsqueeze(0).repeat(length, 1, 1)), 2).view(-1, 2*self.Wx(uniter_output).size(1)) yzs.append(yz) causal_logits_list.append(self.causal_score(yz)) if compute_loss: loss_causal = self.do_calculus_loss_1(class_logits_causal_list, img_pos_feat, img_soft_labels, compute_loss) return loss_causal else: prediction_soft_label = self.do_calculus_loss_1(class_logits_causal_list, img_pos_feat, img_soft_labels, compute_loss) return prediction_soft_label ''' loss_causal = self.do_calculus_loss_1(class_logits_causal_list, img_pos_feat, img_soft_labels, True) prediction_soft_label = self.do_calculus_loss_1(class_logits_causal_list, img_pos_feat, img_soft_labels, False) return loss_causal, prediction_soft_label ### # DC 3 (Do-Calculus 3) def forward_dc_3(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, txt_lens, num_bbs, img_soft_labels, task, compute_loss=True): _, embedding_output = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) # # sequence_img_output = sequence_output[:, input_ids.size(1):, :]; import ipdb;ipdb.set_trace(context=10) img_emb_list = [] for i, sequence in enumerate(embedding_output): img_emb_list.append(sequence[txt_lens[i]:, :]) class_logits_causal_list, label_list = self.causal_predictor_2(img_emb_list, num_bbs, img_soft_labels) ### use 'do-calculus' in UNITER pretrain : compute loss device = img_pos_feat.device # batch_zs = pad_tensors(zs, num_bbs).to(device) ''' if compute_loss: loss_causal = self.do_calculus_loss_1(class_logits_causal_list, img_pos_feat, img_soft_labels, compute_loss) return loss_causal else: prediction_soft_label = self.do_calculus_loss_1(class_logits_causal_list, img_pos_feat, img_soft_labels, compute_loss) return prediction_soft_label''' ### loss_causal, prediction_soft_label = self.do_calculus_loss_1(class_logits_causal_list, img_pos_feat, img_soft_labels, True) return loss_causal, prediction_soft_label, label_list # DC 4 (Do-Calculus 4 : confounder linear transformation) def forward_dc_4(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, txt_lens, num_bbs, img_soft_labels, task, compute_loss=True): _, embedding_output = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) # # sequence_img_output = sequence_output[:, input_ids.size(1):, :]; import ipdb;ipdb.set_trace(context=10) img_emb_list = [] for i, sequence in enumerate(embedding_output): img_emb_list.append(sequence[txt_lens[i]:, :]) class_logits_causal_list, label_list = self.causal_predictor_3(img_emb_list, num_bbs, img_soft_labels) ### use 'do-calculus' in UNITER pretrain : compute loss device = img_pos_feat.device loss_causal, prediction_soft_label = self.do_calculus_loss_1(class_logits_causal_list, img_pos_feat, img_soft_labels, True) return loss_causal, prediction_soft_label, label_list # DC 5 (Do-Calculus 5 : ) def forward_dc_5(self, input_ids, position_ids, txt_type_ids, img_feat, img_pos_feat, attention_mask, gather_index, img_masks, img_mask_tgt, label_targets, txt_lens, num_bbs, img_soft_labels, task, compute_loss=True): _, embedding_output = self.uniter(input_ids, position_ids, img_feat, img_pos_feat, attention_mask, gather_index, output_all_encoded_layers=False, txt_type_ids=txt_type_ids) # # sequence_img_output = sequence_output[:, input_ids.size(1):, :]; import ipdb;ipdb.set_trace(context=10) img_emb_list = [] for i, sequence in enumerate(embedding_output): img_emb_list.append(sequence[txt_lens[i]:, :]) device = torch.device("cuda", hvd.local_rank()) self.causal_predictor_3.to(device) class_logits_causal_list, label_list = self.causal_predictor_3(img_emb_list, num_bbs, img_soft_labels) ### use 'do-calculus' in UNITER pretrain : compute loss loss_causal, prediction_soft_label = self.do_calculus_loss_1(class_logits_causal_list, img_pos_feat, img_soft_labels, True) return loss_causal, prediction_soft_label, label_list def cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) def pad_tensors(tensors, lens=None, pad=0): """B x [T, ...]""" if lens is None: pass max_len = max(lens) bs = len(tensors) hid = tensors[0].size(-1) dtype = tensors[0].dtype output = torch.zeros(bs, max_len, hid, dtype=dtype) if pad: output.data.fill_(pad) if len(tensors[0].shape) > 1: for i, (t, l) in enumerate(zip(tensors, lens)): output.data[i, :l, ...] = t.data else: lens = [tensors[i].size(0) for i in range(len(tensors))] # max_len = max(lens) output = torch.zeros(bs, max_len, dtype=dtype) for i, (t, l) in enumerate(zip(tensors, lens)): if l > max_len: l = max_len output.data[i, :l] = t.data[:l] return output
49.757094
171
0.591295
5,172
43,836
4.591261
0.050657
0.018445
0.030742
0.038406
0.881622
0.868146
0.852228
0.840605
0.836899
0.828097
0
0.008016
0.334086
43,836
880
172
49.813636
0.805454
0.081691
0
0.669903
0
0
0.013499
0.000712
0
0
0
0
0.009709
1
0.04466
false
0.001942
0.017476
0
0.132039
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
16ab538ab9c32426ccd8e3a11b89e1a76b8ef291
25,621
py
Python
tuesmon_ncurses/controllers/milestones.py
tuesmoncom/tuesmon-ncurses
21cd4c39f2eed13e7fe42cac0e70d752f76382ca
[ "Apache-2.0" ]
null
null
null
tuesmon_ncurses/controllers/milestones.py
tuesmoncom/tuesmon-ncurses
21cd4c39f2eed13e7fe42cac0e70d752f76382ca
[ "Apache-2.0" ]
null
null
null
tuesmon_ncurses/controllers/milestones.py
tuesmoncom/tuesmon-ncurses
21cd4c39f2eed13e7fe42cac0e70d752f76382ca
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ tuesmon_ncurses.controllers.milestone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ from concurrent.futures import wait import functools from tuesmon_ncurses.config import settings from tuesmon_ncurses.ui import signals from tuesmon_ncurses.ui.widgets.milestones import UserStoryEntry, TaskEntry import tuesmon_ncurses.data from . import base class ProjectMilestoneSubController(base.Controller): def __init__(self, view, executor, state_machine): self.view = view self.executor = executor self.state_machine = state_machine self.view.taskboard.on_task_status_change = self.handle_change_task_status_request self.view.taskboard.on_task_assigned_to_change = self.handle_change_task_assigned_to_request self.view.taskboard.on_user_story_status_change = self.handle_change_user_story_status_request self.view.taskboard.on_user_story_points_change = self.handle_change_user_story_points_request def handle(self, key): if key == settings.data.milestone.keys.create_user_story: self.new_user_story() elif key == settings.data.milestone.keys.create_task: self.new_task() elif key == settings.data.milestone.keys.edit: self.edit_user_story_or_task() elif key == settings.data.milestone.keys.delete: self.delete_user_story_or_task() elif key == settings.data.milestone.keys.change_to_milestone: self.change_to_milestone() elif key == settings.data.milestone.keys.reload: self.load() elif key == settings.data.milestone.keys.help: self.help_info() else: super().handle(key) def load(self): self.state_machine.transition(self.state_machine.PROJECT_MILESTONES) self.view.notifier.info_msg("Fetching Stats and User stories") if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(functools.partial(self.handle_user_stories_and_task_info_fetched, info_msg="User stories and tasks fetched", error_msg="Failed to fetch milestone data " "(user stories or task)")) def new_user_story(self): user_story = {"milestone": self.view._milestone.get("id", None)} self.view.open_user_story_form(user_story=user_story) signals.connect(self.view.user_story_form.cancel_button, "click", lambda _: self.cancel_user_story_form()) signals.connect(self.view.user_story_form.save_button, "click", lambda _: self.handle_create_user_story_request()) def new_task(self): selected_item = self.view.taskboard.widget.get_focus()[0] if isinstance(selected_item, UserStoryEntry): task = {"user_story": selected_item.user_story.get("id", None)} elif isinstance(selected_item, TaskEntry): task = {"user_story": selected_item.task.get("user_story", None)} else: task = {"user_story": None} self.view.open_task_form(task=task) signals.connect(self.view.task_form.cancel_button, "click", lambda _: self.cancel_task_form()) signals.connect(self.view.task_form.save_button, "click", lambda _: self.handle_create_task_request()) def edit_user_story_or_task(self): selected_item = self.view.taskboard.widget.get_focus()[0] if isinstance(selected_item, UserStoryEntry): self.view.open_user_story_form(user_story=selected_item.user_story) signals.connect(self.view.user_story_form.cancel_button, "click", lambda _: self.cancel_user_story_form()) signals.connect(self.view.user_story_form.save_button, "click", lambda _: self.handle_edit_user_story_request(selected_item.user_story)) elif isinstance(selected_item, TaskEntry): self.view.open_task_form(task=selected_item.task) signals.connect(self.view.task_form.cancel_button, "click", lambda _: self.cancel_task_form()) signals.connect(self.view.task_form.save_button, "click", lambda _: self.handle_edit_task_request(selected_item.task)) def cancel_user_story_form(self): self.view.close_user_story_form() def cancel_task_form(self): self.view.close_task_form() def delete_user_story_or_task(self): selected_item = self.view.taskboard.widget.get_focus()[0] if isinstance(selected_item, UserStoryEntry): uss_delete_f = self.executor.delete_user_story(selected_item.user_story) uss_delete_f.add_done_callback(self.handle_delete_user_story_response) elif isinstance(selected_item, TaskEntry): task_delete_f = self.executor.delete_task(selected_item.task) task_delete_f.add_done_callback(self.handle_delete_task_response) def change_to_milestone(self): self.view.open_milestones_selector_popup(current_milestone=self.view._milestone) signals.connect(self.view.milestone_selector_popup.cancel_button, "click", lambda _: self.cancel_milestone_selector_popup()) for option in self.view.milestone_selector_popup.options: signals.connect(option, "click", functools.partial(self.handle_change_to_milestone)) def cancel_milestone_selector_popup(self): self.view.close_milestone_selector_popup() def help_info(self): self.view.open_help_popup() signals.connect(self.view.help_popup.close_button, "click", lambda _: self.close_help_info()) def close_help_info(self): self.view.close_help_popup() def handle_milestone(self, future): self.view._milestone = future.result() if self.view._milestone: self.view.info.populate(self.view._milestone) self.state_machine.refresh() def handle_milestone_stats(self, future): self.view._milestone_stats = future.result() if self.view._milestone_stats: self.view.stats.populate(self.view._milestone_stats) def handle_user_stories(self, future): self.view._user_stories = future.result() def handle_tasks(self, future): self.view._tasks = future.result() def handle_user_stories_and_task_info_fetched(self, future_with_results, info_msg=None, error_msg=None): done, not_done = future_with_results.result() if len(done) == 2: self.view.taskboard.populate(self.view._user_stories, self.view._tasks) if info_msg: self.view.notifier.info_msg(info_msg) self.state_machine.refresh() else: # TODO retry failed operations if error_msg: self.view.notifier.error_msg(error_msg) def handle_create_user_story_request(self): data = self.view.get_user_story_form_data() if not data.get("subject", None): self.view.notifier.error_msg("Subject is required") else: us_post_f = self.executor.create_user_story(data) us_post_f.add_done_callback(self.handle_create_user_story_response) def handle_create_user_story_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Create US error") else: self.view.notifier.info_msg("Create US successful!") self.view.close_user_story_form() if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) def handle_create_task_request(self): data = self.view.get_task_form_data() if not data.get("subject", None): self.view.notifier.error_msg("Subject is required") else: task_post_f = self.executor.create_task(data) task_post_f.add_done_callback(self.handle_create_task_response) def handle_create_task_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Create task error") else: self.view.notifier.info_msg("Create task successful!") self.view.close_task_form() if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) def handle_edit_user_story_request(self, user_story): data = self.view.get_user_story_form_data() if not data.get("subject", None): self.view.notifier.error_msg("Subject is required") else: us_patch_f = self.executor.update_user_story(user_story, data) us_patch_f.add_done_callback(self.handle_edit_user_story_response) def handle_edit_user_story_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Edit error") else: self.view.notifier.info_msg("Edit US successful!") self.view.close_user_story_form() if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) def handle_edit_task_request(self, task): data = self.view.get_task_form_data() if not data.get("subject", None): self.view.notifier.error_msg("Subject is required") else: task_patch_f = self.executor.update_task(task, data) task_patch_f.add_done_callback(self.handle_edit_task_response) def handle_edit_task_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Edit error") else: self.view.notifier.info_msg("Edit task successful!") self.view.close_task_form() if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) def handle_delete_user_story_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Error deleting user_story") else: self.view.notifier.info_msg("Delete user story") if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) def handle_delete_task_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Error deleting task") else: self.view.notifier.info_msg("Delete task") if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) def handle_change_to_milestone(self, selected_option): self.view.notifier.info_msg("Change to milestone '{}'".format(selected_option.milestone["name"])) milestone = selected_option.milestone milestone_f = self.executor.milestone(milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) self.cancel_milestone_selector_popup() def handle_change_task_status_request(self, combo, item, state, user_data=None): data = {"status": item.value} task = user_data task_patch_f = self.executor.update_task(task, data) task_patch_f.add_done_callback(self.handle_change_task_status_response) def handle_change_task_status_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Change task status with errors") # TODO: Select old value else: self.view.notifier.info_msg("Change task status successful!") if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) def handle_change_task_assigned_to_request(self, combo, item, state, user_data=None): data = {"assigned_to": item.value} task = user_data task_patch_f = self.executor.update_task(task, data) task_patch_f.add_done_callback(self.handle_change_task_assigned_to_response) def handle_change_task_assigned_to_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Change task assignation with errors") # TODO: Select old value else: self.view.notifier.info_msg("Change task assignation successful!") if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) def handle_change_user_story_status_request(self, combo, item, state, user_data=None): data = {"status": item.value} user_story = user_data user_story_patch_f = self.executor.update_user_story(user_story, data) user_story_patch_f.add_done_callback(self.handle_change_user_story_status_response) def handle_change_user_story_status_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Change user story status with errors") # TODO: Select old value else: self.view.notifier.info_msg("Change user story status successful!") if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched) def handle_change_user_story_points_request(self, combo, item, state, user_data=None): user_story, role_id = user_data data = {"points": {role_id: item.value}} user_story_patch_f = self.executor.update_user_story(user_story, data) user_story_patch_f.add_done_callback(self.handle_change_user_story_points_response) def handle_change_user_story_points_response(self, future): response = future.result() if response is None: self.view.notifier.error_msg("Change user story points with errors") # TODO: Select old value else: self.view.notifier.info_msg("Change user story points successful!") if hasattr(self, "milestone"): current_milestone = self.milestone else: current_milestone = tuesmon_ncurses.data.current_milestone(self.view._project) milestone_f = self.executor.milestone(current_milestone, self.view._project) milestone_f.add_done_callback(self.handle_milestone) milestone_stats_f = self.executor.milestone_stats(current_milestone, self.view._project) milestone_stats_f.add_done_callback(self.handle_milestone_stats) user_stories_f = self.executor.user_stories(current_milestone, self.view._project) user_stories_f.add_done_callback(self.handle_user_stories) tasks_f = self.executor.tasks(current_milestone, self.view._project) tasks_f.add_done_callback(self.handle_tasks) futures = (tasks_f, user_stories_f) futures_completed_f = self.executor.pool.submit(lambda : wait(futures, 10)) futures_completed_f.add_done_callback(self.handle_user_stories_and_task_info_fetched)
44.480903
111
0.691269
3,168
25,621
5.212753
0.044192
0.067821
0.055105
0.067821
0.863146
0.817609
0.76965
0.755177
0.726717
0.724234
0
0.001458
0.223645
25,621
575
112
44.558261
0.828767
0.00847
0
0.628019
0
0
0.037966
0
0
0
0
0.001739
0
1
0.089372
false
0
0.016908
0
0.108696
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
16bea6ecc5bee0324d0cf7d99003ac3083ccce9a
400
py
Python
python27/Basic/tempDel.py
userYKK/pythonTry
ecd36d4e49e12e5376e275159cfbc2eb6c7b64d2
[ "Apache-2.0" ]
null
null
null
python27/Basic/tempDel.py
userYKK/pythonTry
ecd36d4e49e12e5376e275159cfbc2eb6c7b64d2
[ "Apache-2.0" ]
null
null
null
python27/Basic/tempDel.py
userYKK/pythonTry
ecd36d4e49e12e5376e275159cfbc2eb6c7b64d2
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # -*- coding: UTF-8 -*- # 变量类型 print "############################## 1. 多个变量赋值" print "############################## 2. 标准数据类型" print "############################## 3. 删除对象引用" print "############################## 4. 字符串" print "############################## 5. List列表" print "############################## 6. 元组" print "############################## 7. 字典"
17.391304
49
0.2475
28
400
3.535714
0.785714
0
0
0
0
0
0
0
0
0
0
0.023188
0.1375
400
22
50
18.181818
0.263768
0.1075
0
0
0
0
0.779661
0.59322
0
0
0
0
0
0
null
null
0
0
null
null
1
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
1
1
null
0
0
0
0
1
0
0
0
0
0
0
1
0
6
bc7f4e0e3927de73171c25113e7ffa5a5e8a2518
19
py
Python
l3ns/utils/__init__.py
OlegJakushkin/l3ns
320184cb03837b9d6d13cb6ff006263ad1a99544
[ "MIT" ]
3
2021-04-02T11:05:54.000Z
2021-12-17T17:46:02.000Z
l3ns/utils/__init__.py
OlegJakushkin/l3ns
320184cb03837b9d6d13cb6ff006263ad1a99544
[ "MIT" ]
1
2020-10-31T08:36:11.000Z
2020-10-31T08:36:11.000Z
l3ns/utils/__init__.py
OlegJakushkin/l3ns
320184cb03837b9d6d13cb6ff006263ad1a99544
[ "MIT" ]
1
2020-06-08T03:48:58.000Z
2020-06-08T03:48:58.000Z
from . import args
9.5
18
0.736842
3
19
4.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.210526
19
1
19
19
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
bcd77a40af468bcaae999faa06107f102b96e38a
115
py
Python
modules/dials/viewer/__init__.py
jorgediazjr/dials-dev20191018
77d66c719b5746f37af51ad593e2941ed6fbba17
[ "BSD-3-Clause" ]
null
null
null
modules/dials/viewer/__init__.py
jorgediazjr/dials-dev20191018
77d66c719b5746f37af51ad593e2941ed6fbba17
[ "BSD-3-Clause" ]
null
null
null
modules/dials/viewer/__init__.py
jorgediazjr/dials-dev20191018
77d66c719b5746f37af51ad593e2941ed6fbba17
[ "BSD-3-Clause" ]
1
2020-02-04T15:39:06.000Z
2020-02-04T15:39:06.000Z
from __future__ import absolute_import, division, print_function from dials_viewer_ext import * # special import
28.75
64
0.834783
15
115
5.866667
0.733333
0
0
0
0
0
0
0
0
0
0
0
0.130435
115
3
65
38.333333
0.88
0.121739
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0.5
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
1
0
6
4c5558ad5a1c08cef60c689fc69c3143725161c4
29
py
Python
enso/platform/win32/registry/__init__.py
blackdaemon/enso-launcher-continued
346f82811e77caf73560619cdeb16afabfbf1fce
[ "BSD-3-Clause" ]
7
2015-09-19T20:57:32.000Z
2020-12-31T16:34:42.000Z
enso/platform/win32/registry/__init__.py
blackdaemon/enso-launcher-continued
346f82811e77caf73560619cdeb16afabfbf1fce
[ "BSD-3-Clause" ]
21
2015-11-03T23:15:25.000Z
2018-10-11T21:57:45.000Z
enso/platform/win32/registry/__init__.py
blackdaemon/enso-launcher-continued
346f82811e77caf73560619cdeb16afabfbf1fce
[ "BSD-3-Clause" ]
4
2015-09-15T17:18:00.000Z
2021-06-16T07:06:06.000Z
from WindowsRegistry import *
29
29
0.862069
3
29
8.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.103448
29
1
29
29
0.961538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
4c5d079886cede3c243ff79d8d0f50eee07685b0
33
py
Python
colormate/__init__.py
MrJakeSir/theming
fd572c871fb4fd67cc4f9517558570d652ad1f0c
[ "MIT" ]
3
2021-10-02T02:23:50.000Z
2021-10-02T16:03:33.000Z
colormate/__init__.py
MrJakeSir/themify
fd572c871fb4fd67cc4f9517558570d652ad1f0c
[ "MIT" ]
null
null
null
colormate/__init__.py
MrJakeSir/themify
fd572c871fb4fd67cc4f9517558570d652ad1f0c
[ "MIT" ]
null
null
null
from colormate.main import Theme
16.5
32
0.848485
5
33
5.6
1
0
0
0
0
0
0
0
0
0
0
0
0.121212
33
1
33
33
0.965517
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
d5de1331d127c8d42888667ea498f9bcad69aaef
145
py
Python
tests/builtin/any.py
Slater-Victoroff/pyjaco
89c4e3c46399c5023b0e160005d855a01241c58a
[ "MIT" ]
38
2015-01-01T18:08:59.000Z
2022-02-18T08:57:27.000Z
tests/builtin/any.py
dusty-phillips/pyjaco
066895ae38d1828498e529c1875cb88df6cbc54d
[ "MIT" ]
1
2020-01-08T04:32:52.000Z
2020-01-08T04:32:52.000Z
tests/builtin/any.py
Slater-Victoroff/pyjaco
89c4e3c46399c5023b0e160005d855a01241c58a
[ "MIT" ]
12
2016-03-07T09:30:49.000Z
2021-09-05T20:38:47.000Z
x = [False, 0, 0.0, [], (), dict()] print any([]) print any(x) print any(x + [1]) try: print any() except TypeError, E: print "Fail", E
14.5
35
0.531034
24
145
3.208333
0.5
0.415584
0.233766
0
0
0
0
0
0
0
0
0.036036
0.234483
145
9
36
16.111111
0.657658
0
0
0
0
0
0.027586
0
0
0
0
0
0
0
null
null
0
0
null
null
0.625
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
1
0
6
d5f0ac2403e0470d0b3950da372277c73acbeaf4
148
py
Python
back/auth/__init__.py
jamalg/my_shops
ccba408aae3b7270ace57c608599a194fa9fe4c8
[ "MIT" ]
null
null
null
back/auth/__init__.py
jamalg/my_shops
ccba408aae3b7270ace57c608599a194fa9fe4c8
[ "MIT" ]
null
null
null
back/auth/__init__.py
jamalg/my_shops
ccba408aae3b7270ace57c608599a194fa9fe4c8
[ "MIT" ]
null
null
null
from flask import Flask from flask_bcrypt import Bcrypt flask_bcrypt = Bcrypt() def init_app(app: Flask) -> None: flask_bcrypt.init_app(app)
16.444444
33
0.756757
23
148
4.652174
0.347826
0.308411
0.186916
0
0
0
0
0
0
0
0
0
0.162162
148
8
34
18.5
0.862903
0
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0.4
0
0.6
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
6
913b26072825587d1f96a3ab69d57ddf627eec99
43
py
Python
infoicer/__init__.py
edkirin/infoicer
809b26eb583d5725b5cccf62880a3137fed60a1a
[ "MIT" ]
2
2020-08-17T19:02:36.000Z
2022-01-07T20:28:59.000Z
infoicer/__init__.py
edkirin/infoicer
809b26eb583d5725b5cccf62880a3137fed60a1a
[ "MIT" ]
null
null
null
infoicer/__init__.py
edkirin/infoicer
809b26eb583d5725b5cccf62880a3137fed60a1a
[ "MIT" ]
null
null
null
from .infoicer import Invoice, InvoiceItem
21.5
42
0.837209
5
43
7.2
1
0
0
0
0
0
0
0
0
0
0
0
0.116279
43
1
43
43
0.947368
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
914e14686cb97fb685155cba7d062eec566eedec
183
py
Python
api/category/admin.py
Egor4ik325/rankrise
c4377237c9afbdda365c01453f73151189129aa9
[ "MIT" ]
null
null
null
api/category/admin.py
Egor4ik325/rankrise
c4377237c9afbdda365c01453f73151189129aa9
[ "MIT" ]
null
null
null
api/category/admin.py
Egor4ik325/rankrise
c4377237c9afbdda365c01453f73151189129aa9
[ "MIT" ]
null
null
null
from django.contrib import admin from mptt.admin import MPTTModelAdmin from category.models import Category @admin.register(Category) class CategoryAdmin(MPTTModelAdmin): pass
18.3
37
0.819672
22
183
6.818182
0.590909
0
0
0
0
0
0
0
0
0
0
0
0.125683
183
9
38
20.333333
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.166667
0.5
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
e6927d9cfc0d9472d7f4d3dc0e626a429ffa44fd
66
py
Python
lib/data/__init__.py
vinbigdata-medical/endocv2020-seg
91675391911a3d70a09c51edb0eeb73b1081b037
[ "Apache-2.0" ]
6
2021-02-13T18:41:59.000Z
2021-06-01T09:29:06.000Z
lib/data/__init__.py
VinBDI-MedicalImagingTeam/endocv2020-seg
91675391911a3d70a09c51edb0eeb73b1081b037
[ "Apache-2.0" ]
1
2020-11-24T03:25:21.000Z
2020-11-24T03:25:21.000Z
lib/data/__init__.py
vinbigdata-medical/endocv2020-seg
91675391911a3d70a09c51edb0eeb73b1081b037
[ "Apache-2.0" ]
1
2022-03-18T10:28:19.000Z
2022-03-18T10:28:19.000Z
from .cutmix_mixup import cutmix_data, mixup_data, mixup_criterion
66
66
0.878788
10
66
5.4
0.6
0.333333
0
0
0
0
0
0
0
0
0
0
0.075758
66
1
66
66
0.885246
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e69fbbb35c232fe7c9cdab4d65f24b7d1dbd854e
16,792
py
Python
tests/integration/test_results_metadata.py
amlight/pathfinder
a2f219e15a2ab4a043723bfa4a5453796d4ba0bf
[ "MIT" ]
null
null
null
tests/integration/test_results_metadata.py
amlight/pathfinder
a2f219e15a2ab4a043723bfa4a5453796d4ba0bf
[ "MIT" ]
null
null
null
tests/integration/test_results_metadata.py
amlight/pathfinder
a2f219e15a2ab4a043723bfa4a5453796d4ba0bf
[ "MIT" ]
null
null
null
"""Module to test the KytosGraph in graph.py.""" # module under test from tests.integration.metadata_settings import MetadataSettings class TestResultsMetadata(MetadataSettings): """Tests for the graph class. Tests if the metadata in search result edges have passing values. """ def test_path_constrained_user_user(self): """Test to see if there is a constrained path between User - User.""" self.initializer() result = self.get_path_constrained("User1", "User2") self.assertNotEqual(result, [], True) def test_path_constrained_user_switch(self): """Test to see if there is a constrained path between User - Switch.""" self.initializer() result = self.get_path_constrained("User1", "S4") self.assertNotEqual(result, [], True) def test_path_constrained_switch_switch(self): """Test to see if there is a constrained path between Switch - Switch.""" self.initializer() result = self.get_path_constrained("S2", "S4") self.assertNotEqual(result, [], True) def test_no_path_constrained_user_user(self): """Test to see if there is NOT a constrained path between User - User.""" self.initializer() result = self.get_path_constrained("User1", "User3") self.assertEqual(result, [], True) def test_path_constrained_user_user_t1(self): """Test to see if there is a constrained path between User - User using the 2nd topology variant.""" self.initializer(val=1) result = self.get_path_constrained("User1", "User3") self.assertNotEqual(result, [], True) def test_no_path_constrained_user_user_t1(self): """Test to see if there is NOT a constrained path between User - User using the 2nd topology variant.""" self.initializer(val=1) result = self.get_path_constrained("User1", "User2") self.assertEqual(result, [], True) def test_no_path_constrained_switch_switch_t1(self): """Test to see if there is NOT a constrained path between Switch - Switch using the 2nd topology variant.""" self.initializer(val=1) result = self.get_path_constrained("S1", "S2") self.assertEqual(result, [], True) def test_path_constrained_user_user_t2(self): """Test to see if there is a constrained path between User - User using the 3rd topology variant.""" self.initializer(val=2) result = self.get_path_constrained("User1", "User2") self.assertNotEqual(result, [], True) def test_path_constrained_user_switch_t2(self): """Test to see if there is a constrained path between User - Switch using the 3rd topology variant.""" self.initializer(val=2) result = self.get_path_constrained("User1", "S4") self.assertNotEqual(result, [], True) def test_path_constrained_switch_switch_t2(self): """Test to see if there is a constrained path between two switches using the 3rd topology variant.""" self.initializer(val=2) result = self.get_path_constrained("S2", "S4") self.assertNotEqual(result, [], True) def test_path_constrained_reliability(self): """Tests to see if the edges used in the paths of the result set do not have poor reliability """ requirements = {"reliability": 3} self.initializer() result = self.get_path_constrained("User1", "User2", base=requirements) self.assertNotEqual(result, []) def test_no_path_constrained_reliability(self): """Tests to see if the edges used in the paths of the result set do not have poor reliability """ requirements = {"reliability": 3} self.initializer() result = self.get_path_constrained("User1", "User3", base=requirements) self.assertEqual(result, []) def test_path_constrained_reliability_detailed(self): """Tests to see if the edges used in the paths of the result set do not have poor reliability """ reliabilities = [] requirements = {"reliability": 3} poor_reliability = 1 self.initializer() result = self.get_path_constrained("User1", "User2", base=requirements) if result: for path in result[0]["paths"]: for i in range(1, len(path)): endpoint_a = path[i - 1] endpoint_b = path[i] meta_data = self.graph.get_link_metadata( endpoint_a, endpoint_b) if meta_data and "reliability" in meta_data.keys(): reliabilities.append(meta_data["reliability"]) self.assertNotIn(poor_reliability, reliabilities) else: self.assertNotEqual(result, []) def test_path_constrained_delay(self): """Tests to see if the edges used in the paths from User 1 to User 2 have less than 30 delay. """ delays = [] requirements = {"delay": 29} self.initializer() result = self.get_path_constrained("User1", "User2", base=requirements) if result: for path in result[0]["paths"]: for i in range(1, len(path)): endpoint_a = path[i - 1] endpoint_b = path[i] meta_data = self.graph.get_link_metadata( endpoint_a, endpoint_b) if meta_data and "delay" in meta_data.keys(): delays.append(meta_data["delay"]) if not delays: self.assertNotEqual(delays, []) valid = True for delay in delays: if delay > requirements["delay"]: valid = False break self.assertEqual(valid, True) def bandwidth_list_builder(self, bandwidths, result): """Method to set up bandwidth metadata""" for path in result[0]["paths"]: for i in range(1, len(path)): endpoint_a = path[i - 1] endpoint_b = path[i] meta_data = self.graph.get_link_metadata( endpoint_a, endpoint_b) if meta_data and "bandwidth" in meta_data.keys(): bandwidths.append(meta_data["bandwidth"]) def test_path_constrained_bandwidth_detailed(self): """Tests to see if the edges used in the paths from User 1 to User 2 have at least 20 bandwidth. """ bandwidths = [] requirements = {"bandwidth": 20} self.initializer() result = self.get_path_constrained("User1", "User2", base=requirements) if result: self.bandwidth_list_builder(bandwidths, result) valid = True for bandwidth in bandwidths: if bandwidth < requirements["bandwidth"]: valid = False break self.assertEqual(valid, True) def test_path_constrained_bandwidth_detailed_t2(self): """Tests to see if the edges used in the paths from User 1 to User 2 have at least 20 bandwidth. """ bandwidths = [] requirements = {"bandwidth": 20} self.initializer(val=2) result = self.get_path_constrained("User1", "User2", base=requirements) if result: self.bandwidth_list_builder(bandwidths, result) for bandwidth in bandwidths: self.assertEqual(bandwidth < requirements["bandwidth"], False) def test_path_constrained_bandwidth_delay(self): """Tests to see if the edges used in the paths from User 1 to User 2 have at least 20 bandwidth and under 30 delay. """ bandwidths = [] delays = [] requirements = {"bandwidth": 20, "delay": 29} self.initializer() result = self.get_path_constrained("User1", "User2", base=requirements) if result: for path in result[0]["paths"]: for i in range(1, len(path)): endpoint_a = path[i - 1] endpoint_b = path[i] meta_data = self.graph.get_link_metadata( endpoint_a, endpoint_b) if meta_data and "bandwidth" in meta_data.keys(): bandwidths.append(meta_data["bandwidth"]) elif meta_data and "delay" in meta_data.keys(): delays.append(meta_data["delay"]) for bandwidth in bandwidths: self.assertEqual(bandwidth < requirements["bandwidth"], False) for delay in delays: self.assertEqual(delay > requirements["delay"], False) # @staticmethod # def generate_topology(): # """Generates a predetermined topology.""" # switches = {} # interfaces = {} # links = {} # # TestResultsMetadata.setting_switches_interfaces(interfaces, switches) # # TestResultsMetadata.setting_links(interfaces, links) # # TestResultsMetadata.adding_metadata(links) # # return switches, links # @staticmethod # def setting_switches_interfaces(interfaces, switches): # """Generates the switches in a a predetermined topology.""" # TestResults.create_switch("User1", switches) # TestResults.add_interfaces(3, switches["User1"], interfaces) # # TestResults.create_switch("S2", switches) # TestResults.add_interfaces(2, switches["S2"], interfaces) # # TestResults.create_switch("User2", switches) # TestResults.add_interfaces(3, switches["User2"], interfaces) # # TestResults.create_switch("S4", switches) # TestResults.add_interfaces(4, switches["S4"], interfaces) # # TestResults.create_switch("S5", switches) # TestResults.add_interfaces(2, switches["S5"], interfaces) # # @staticmethod # def setting_links(interfaces, links): # """Generates the links in a a predetermined topology.""" # TestResults.create_link("User1:1", "S2:1", interfaces, links) # # TestResults.create_link("User1:2", "S5:1", interfaces, links) # # TestResults.create_link("User1:3", "S4:1", interfaces, links) # # TestResults.create_link("S2:2", "User2:1", interfaces, links) # # TestResults.create_link("User2:2", "S4:2", interfaces, links) # # TestResults.create_link("S5:2", "S4:3", interfaces, links) # # TestResults.create_link("User2:3", "S4:4", interfaces, links) # # @staticmethod # def adding_metadata(links): # """Add the links' metadata in a a predetermined topology.""" # TestResults.add_metadata_to_link( # "User1:1", "S2:1", { # "reliability": 3, "ownership": "B", "delay": 30, # "bandwidth": 20}, links) # # TestResults.add_metadata_to_link( # "User1:2", "S5:1", { # "reliability": 1, "ownership": "A", "delay": 5, # "bandwidth": 50}, links) # # TestResults.add_metadata_to_link( # "User1:3", "S4:1", { # "reliability": 3, "ownership": "A", "delay": 60, # "bandwidth": 10}, links) # # TestResults.add_metadata_to_link( # "S2:2", "User2:1", { # "reliability": 3, "ownership": "B", "delay": 30, # "bandwidth": 20}, links) # # TestResults.add_metadata_to_link( # "User2:2", "S4:2", { # "reliability": 3, "ownership": "B", "delay": 30, # "bandwidth": 10}, links) # # TestResults.add_metadata_to_link( # "S5:2", "S4:3", { # "reliability": 1, "ownership": "A", "delay": 10, # "bandwidth": 50}, links) # # TestResults.add_metadata_to_link( # "User2:3", "S4:4", { # "reliability": 3, "ownership": "A", "delay": 29, # "bandwidth": 20}, links) # # @staticmethod # def generate_topology_1(): # """Generates a predetermined topology # - 2nd Variant.""" # switches = {} # interfaces = {} # links = {} # # TestResultsMetadata.setting_switches_interfaces_1(interfaces, switches) # # TestResultsMetadata.setting_links_1(interfaces, links) # # TestResultsMetadata.adding_metadata_1(links) # # return switches, links # # @staticmethod # def setting_switches_interfaces_1(interfaces, switches): # """Generates the switches in a a predetermined topology # - 2nd variant.""" # TestResults.create_switch("User1", switches) # TestResults.add_interfaces(2, switches["User1"], interfaces) # # TestResults.create_switch("User2", switches) # TestResults.add_interfaces(2, switches["User2"], interfaces) # # TestResults.create_switch("User3", switches) # TestResults.add_interfaces(2, switches["User3"], interfaces) # # TestResults.create_switch("S1", switches) # TestResults.add_interfaces(1, switches["S1"], interfaces) # # TestResults.create_switch("S2", switches) # TestResults.add_interfaces(1, switches["S2"], interfaces) # # TestResults.create_switch("S3", switches) # TestResults.add_interfaces(2, switches["S3"], interfaces) # # @staticmethod # def setting_links_1(interfaces, links): # """Generates the links in a a predetermined topology # - 2nd Variant.""" # TestResults.create_link("User1:1", "S1:1", interfaces, links) # # TestResults.create_link("User1:2", "S3:1", interfaces, links) # # TestResults.create_link("User2:1", "S2:1", interfaces, links) # # TestResults.create_link("User3:1", "S3:2", interfaces, links) # # @staticmethod # def adding_metadata_1(links): # """Add the links' metadata in a a predetermined topology # - 2nd Variant.""" # TestResults.add_metadata_to_link( # "User1:1", "S1:1", { # "reliability": 3, "ownership": "B", "delay": 30, # "bandwidth": 20}, links) # # TestResults.add_metadata_to_link( # "User1:2", "S3:1", { # "reliability": 1, "ownership": "A", "delay": 5, # "bandwidth": 50}, links) # # TestResults.add_metadata_to_link( # "User2:1", "S2:1", { # "reliability": 3, "ownership": "A", "delay": 60, # "bandwidth": 10}, links) # # TestResults.add_metadata_to_link( # "User3:1", "S3:2", { # "reliability": 3, "ownership": "B", "delay": 30, # "bandwidth": 20}, links) # # @staticmethod # def generate_topology_2(): # """Generates a predetermined topology # - 3rd Variant.""" # switches = {} # interfaces = {} # links = {} # # TestResultsMetadata.setting_switches_interfaces(interfaces, switches) # # TestResultsMetadata.setting_links(interfaces, links) # # TestResultsMetadata.adding_metadata_2(links) # # return switches, links # # @staticmethod # def adding_metadata_2(links): # """Add the links' metadata in a a predetermined topology # - 3rd Variant.""" # TestResults.add_metadata_to_link( # "User1:1", "S2:1", { # "reliability": 3, "ownership": "B", # "bandwidth": 20}, links) # # TestResults.add_metadata_to_link( # "User1:2", "S5:1", { # "reliability": 1, "delay": 5, # "bandwidth": 50}, links) # # TestResults.add_metadata_to_link( # "User1:3", "S4:1", { # "ownership": "A", "delay": 60, # "bandwidth": 10}, links) # # TestResults.add_metadata_to_link( # "S2:2", "User2:1", { # "reliability": 3, # "bandwidth": 20}, links) # # TestResults.add_metadata_to_link( # "User2:2", "S4:2", { # "ownership": "B", # "bandwidth": 10}, links) # # TestResults.add_metadata_to_link( # "S5:2", "S4:3", { # "delay": 10, # "bandwidth": 50}, links) # # TestResults.add_metadata_to_link( # "User2:3", "S4:4", { # "bandwidth": 20}, links)
36.425163
81
0.570391
1,777
16,792
5.230726
0.071469
0.054868
0.042604
0.046477
0.88596
0.829263
0.756966
0.729532
0.672727
0.617214
0
0.029103
0.308361
16,792
460
82
36.504348
0.771224
0.498511
0
0.736111
0
0
0.044442
0
0
0
0
0
0.138889
1
0.125
false
0
0.006944
0
0.138889
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e6ac55840ac84712c644cee3cb25133e4c2b050b
35
py
Python
emailthreads/__init__.py
emersion/python-emailthreads
99f1a04fa0dd2ce8a9c870016b067bf56f3d3bfd
[ "MIT" ]
34
2018-06-14T09:30:29.000Z
2022-03-13T20:56:06.000Z
emailthreads/__init__.py
emersion/python-emailreview
99f1a04fa0dd2ce8a9c870016b067bf56f3d3bfd
[ "MIT" ]
5
2019-12-01T20:20:51.000Z
2022-01-26T08:42:37.000Z
emailthreads/__init__.py
emersion/python-emailreview
99f1a04fa0dd2ce8a9c870016b067bf56f3d3bfd
[ "MIT" ]
3
2019-06-21T20:31:42.000Z
2021-12-18T00:09:07.000Z
from .threads import Thread, parse
17.5
34
0.8
5
35
5.6
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
35
1
35
35
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e6b6e7254c3a0d1bc1a29baa8ca30b8a1aed1436
112
py
Python
chat/views.py
aliPMPAINT/GoW
4b2e4a2a02f6cc51e741a0e0f1ce561e3904dfbb
[ "MIT" ]
4
2020-10-21T17:53:54.000Z
2020-11-05T16:01:26.000Z
chat/views.py
aliPMPAINT/GoW
4b2e4a2a02f6cc51e741a0e0f1ce561e3904dfbb
[ "MIT" ]
null
null
null
chat/views.py
aliPMPAINT/GoW
4b2e4a2a02f6cc51e741a0e0f1ce561e3904dfbb
[ "MIT" ]
null
null
null
from django.shortcuts import render def chatbot(request): return render(request, 'chat/chatbot.html')
18.666667
47
0.732143
14
112
5.857143
0.785714
0
0
0
0
0
0
0
0
0
0
0
0.169643
112
5
48
22.4
0.88172
0
0
0
0
0
0.151786
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
e6f9c0f591964e6616110951ea0952f61f70d758
107
py
Python
terrascript/cloudflare/__init__.py
hugovk/python-terrascript
08fe185904a70246822f5cfbdc9e64e9769ec494
[ "BSD-2-Clause" ]
4
2022-02-07T21:08:14.000Z
2022-03-03T04:41:28.000Z
terrascript/cloudflare/__init__.py
hugovk/python-terrascript
08fe185904a70246822f5cfbdc9e64e9769ec494
[ "BSD-2-Clause" ]
null
null
null
terrascript/cloudflare/__init__.py
hugovk/python-terrascript
08fe185904a70246822f5cfbdc9e64e9769ec494
[ "BSD-2-Clause" ]
2
2022-02-06T01:49:42.000Z
2022-02-08T14:15:00.000Z
# terrascript/cloudflare/__init__.py import terrascript class cloudflare(terrascript.Provider): pass
15.285714
39
0.803738
11
107
7.454545
0.727273
0
0
0
0
0
0
0
0
0
0
0
0.121495
107
6
40
17.833333
0.87234
0.317757
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
5da398062452e7f8afe9bf6b8267f91a265a9e42
36
py
Python
mrss-maker/generatemrss.py
riordan/brightsign-brown-shows
723708db9d73738ce6ae1b1c7f974892bfe0a45d
[ "Apache-2.0" ]
2
2017-11-14T12:04:08.000Z
2019-09-25T01:04:28.000Z
mrss-maker/generatemrss.py
riordan/brightsign-brown-shows
723708db9d73738ce6ae1b1c7f974892bfe0a45d
[ "Apache-2.0" ]
2
2017-02-13T18:49:27.000Z
2017-02-13T18:49:45.000Z
mrss-maker/generatemrss.py
riordan/brightsign-brown-shows
723708db9d73738ce6ae1b1c7f974892bfe0a45d
[ "Apache-2.0" ]
null
null
null
import xml.etree.cElementTree as ET
18
35
0.833333
6
36
5
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
36
1
36
36
0.9375
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
5dad55f6d1a1d44720bcfcf9fe18d92d1225499a
32
py
Python
YouTubeWrapped.py
PradaGoose/google-youtube-history-analytics
287cb0f10005c5be8c8e617a036148dce95f0245
[ "MIT" ]
null
null
null
YouTubeWrapped.py
PradaGoose/google-youtube-history-analytics
287cb0f10005c5be8c8e617a036148dce95f0245
[ "MIT" ]
null
null
null
YouTubeWrapped.py
PradaGoose/google-youtube-history-analytics
287cb0f10005c5be8c8e617a036148dce95f0245
[ "MIT" ]
null
null
null
print("This is a test code")
6.4
28
0.625
6
32
3.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.25
32
4
29
8
0.833333
0
0
0
0
0
0.655172
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
f8ed45e4dd540e0ae23f0ccff66b5f226b0eeb66
223
py
Python
src/masonite/controllers/ViewController.py
josephmancuso/masonite
e9ed31331268bd4966609fbc1e5c55afa5cb0a79
[ "MIT" ]
35
2018-01-08T01:20:16.000Z
2018-02-06T02:37:14.000Z
src/masonite/controllers/ViewController.py
josephmancuso/masonite
e9ed31331268bd4966609fbc1e5c55afa5cb0a79
[ "MIT" ]
55
2018-01-03T02:42:03.000Z
2018-02-06T13:35:54.000Z
src/masonite/controllers/ViewController.py
josephmancuso/masonite
e9ed31331268bd4966609fbc1e5c55afa5cb0a79
[ "MIT" ]
4
2018-01-08T13:13:14.000Z
2018-01-12T19:35:32.000Z
from ..facades import View class ViewController: def __init__(self, template, data): self.template = template self.data = data def show(self): return View.render(self.template, self.data)
20.272727
52
0.654709
27
223
5.259259
0.518519
0.253521
0.225352
0
0
0
0
0
0
0
0
0
0.251121
223
10
53
22.3
0.850299
0
0
0
0
0
0
0
0
0
0
0
0
1
0.285714
false
0
0.142857
0.142857
0.714286
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
5d13b42fa80875f1745baafa3ddd6a92242470f8
26
py
Python
trump/extensions/source/tx-pydatacsv/__init__.py
Equitable/trump
a2802692bc642fa32096374159eea7ceca2947b4
[ "BSD-3-Clause" ]
8
2015-03-14T13:09:46.000Z
2020-08-29T17:49:52.000Z
trump/extensions/source/tx-pydatacsv/__init__.py
Equitable/trump
a2802692bc642fa32096374159eea7ceca2947b4
[ "BSD-3-Clause" ]
64
2015-03-14T12:14:17.000Z
2015-08-15T12:31:42.000Z
trump/extensions/source/tx-pydatacsv/__init__.py
Equitable/trump
a2802692bc642fa32096374159eea7ceca2947b4
[ "BSD-3-Clause" ]
10
2015-03-14T12:18:02.000Z
2022-01-18T21:44:27.000Z
from pydatacsvext import *
26
26
0.846154
3
26
7.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.115385
26
1
26
26
0.956522
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
5d34bdf8c26401b8c2b4f150fcd4e3c91e58abee
368
py
Python
rvk_simulator/rvk-simulator/src/controlvalve.py
N5GEH/n5geh.services.rvk_simulator
4a2c2882ddfedd7fcb260be3a86b9088f29b17c6
[ "MIT" ]
null
null
null
rvk_simulator/rvk-simulator/src/controlvalve.py
N5GEH/n5geh.services.rvk_simulator
4a2c2882ddfedd7fcb260be3a86b9088f29b17c6
[ "MIT" ]
null
null
null
rvk_simulator/rvk-simulator/src/controlvalve.py
N5GEH/n5geh.services.rvk_simulator
4a2c2882ddfedd7fcb260be3a86b9088f29b17c6
[ "MIT" ]
null
null
null
######################################################################## class ThreeWayControlValve: # constructor method with instance variables def __init__(self, hub_position): self.hub_position = hub_position def set_hub(self, new_position): self.hub_position = new_position def get_hub(self): return self.hub_position
24.533333
72
0.565217
36
368
5.416667
0.444444
0.282051
0.307692
0.235897
0
0
0
0
0
0
0
0
0.190217
368
14
73
26.285714
0.654362
0.11413
0
0
0
0
0
0
0
0
0
0
0
1
0.428571
false
0
0
0.142857
0.714286
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
5398d0ff02b7c164dfb4e6290dde5f20bce73bfa
23
py
Python
src/searchnets/utils/__init__.py
NickleDave/visual-search-nets
0d89e6eb00a97bd41cf037c18dc72b8759f7e012
[ "BSD-3-Clause" ]
5
2019-03-03T22:24:01.000Z
2021-05-17T15:30:02.000Z
src/searchnets/utils/__init__.py
NickleDave/visual-search-nets
0d89e6eb00a97bd41cf037c18dc72b8759f7e012
[ "BSD-3-Clause" ]
62
2019-02-04T18:55:26.000Z
2021-07-27T03:24:16.000Z
src/searchnets/utils/__init__.py
NickleDave/visual-search-nets
0d89e6eb00a97bd41cf037c18dc72b8759f7e012
[ "BSD-3-Clause" ]
null
null
null
from . import general
7.666667
21
0.73913
3
23
5.666667
1
0
0
0
0
0
0
0
0
0
0
0
0.217391
23
2
22
11.5
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
54dda175a11fe565d42ffa12c040ebefcac5dad4
74,929
py
Python
lrs/tests/AuthTests.py
sponge-learning/ADL_LRS
0a7e8146bf72376802f84113792a4180c7a86d03
[ "Apache-2.0" ]
null
null
null
lrs/tests/AuthTests.py
sponge-learning/ADL_LRS
0a7e8146bf72376802f84113792a4180c7a86d03
[ "Apache-2.0" ]
null
null
null
lrs/tests/AuthTests.py
sponge-learning/ADL_LRS
0a7e8146bf72376802f84113792a4180c7a86d03
[ "Apache-2.0" ]
3
2021-01-14T12:51:24.000Z
2022-03-15T17:11:11.000Z
import json import base64 import uuid import urllib import hashlib from datetime import datetime, timedelta from django.test import TestCase from django.core.urlresolvers import reverse from django.utils.timezone import utc from django.conf import settings from ..models import Statement, Agent, Verb, Activity, SubStatement from ..views import register, statements from ..util import retrieve_statement class AuthTests(TestCase): # Want to test no auth, so have to disable both auths @classmethod def setUpClass(cls): print "\n%s" % __name__ def setUp(self): if not settings.ALLOW_EMPTY_HTTP_AUTH: settings.ALLOW_EMPTY_HTTP_AUTH = True if settings.OAUTH_ENABLED: settings.OAUTH_ENABLED = False self.auth = "Basic %s" % base64.b64encode("%s:%s" % ('','')) self.guid1 = str(uuid.uuid1()) self.guid2 = str(uuid.uuid1()) self.guid3 = str(uuid.uuid1()) self.guid4 = str(uuid.uuid1()) self.guid5 = str(uuid.uuid1()) self.guid6 = str(uuid.uuid1()) self.guid7 = str(uuid.uuid1()) self.guid8 = str(uuid.uuid1()) self.guid9 = str(uuid.uuid1()) self.guid10 = str(uuid.uuid1()) self.cguid1 = str(uuid.uuid1()) self.cguid2 = str(uuid.uuid1()) self.cguid3 = str(uuid.uuid1()) self.cguid4 = str(uuid.uuid1()) self.cguid5 = str(uuid.uuid1()) self.cguid6 = str(uuid.uuid1()) self.cguid7 = str(uuid.uuid1()) self.cguid8 = str(uuid.uuid1()) stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}, "object": {"id":"act:activity"}, "actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}}) exist_stmt_response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(exist_stmt_response.status_code, 200) self.exist_stmt_id = json.loads(exist_stmt_response.content)[0] self.firstTime = str(datetime.utcnow().replace(tzinfo=utc).isoformat()) self.existStmt1 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}, "object": {"objectType": "Activity", "id":"act:foogie", "definition": {"name": {"en-US":"testname2", "en-GB": "altname"}, "description": {"en-US":"testdesc2", "en-GB": "altdesc"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction", "interactionType": "fill-in","correctResponsesPattern": ["answer"], "extensions": {"ext:key1": "value1", "ext:key2": "value2","ext:key3": "value3"}}}, "result": {"score":{"scaled":.85}, "completion": True, "success": True, "response": "kicked", "duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:key1": "value1", "ext:key2":"value2"}}, "context":{"registration": self.cguid1, "contextActivities": {"other": {"id": "act:NewActivityID2"}}, "revision": "food", "platform":"bard","language": "en-US", "extensions":{"ext:ckey1": "cval1", "ext:ckey2": "cval2"}}}) self.existStmt2 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:s@t.com"}, "object": {"objectType": "Activity", "id":"act:foogie", "definition": {"name": {"en-US":"testname3", "en-GB": "altname"}, "description": {"en-US":"testdesc3","en-GB":"altdesc"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction", "interactionType": "fill-in","correctResponsesPattern": ["answers"], "extensions": {"ext:key11": "value11", "ext:key22": "value22","ext:key33": "value33"}}}, "result": {"score":{"scaled":.75}, "completion": True, "success": True, "response": "shouted", "duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:dkey1": "dvalue1", "ext:dkey2":"dvalue2"}}, "context":{"registration": self.cguid2, "contextActivities": {"other": {"id": "act:NewActivityID22"}}, "revision": "food", "platform":"bard","language": "en-US", "extensions":{"ext:ckey11": "cval11", "ext:ckey22": "cval22"}}}) self.existStmt3 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}, "object": {"objectType": "Activity", "id":"act:act:foogals", "definition": {"name": {"en-US":"testname3"},"description": {"en-US":"testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction", "interactionType": "fill-in","correctResponsesPattern": ["answers"], "extensions": {"ext:key111": "value111", "ext:key222": "value222","ext:key333": "value333"}}}, "result": {"score":{"scaled":.79}, "completion": True, "success": True, "response": "shouted", "duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:dkey1": "dvalue1", "ext:dkey2":"dvalue2"}}, "context":{"registration": self.cguid3, "contextActivities": {"other": {"id": "act:NewActivityID22"}}, "revision": "food", "platform":"bard","language": "en-US", "instructor":{"objectType": "Agent", "name":"bob", "mbox":"mailto:bob@bob.com"}, "extensions":{"ext:ckey111": "cval111","ext:ckey222": "cval222"}}}) self.existStmt4 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}},"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}, "object": {"objectType": "Activity", "id":"act:foogal", "definition": {"name": {"en-US":"testname3"},"description": {"en-US":"testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction", "interactionType": "fill-in","correctResponsesPattern": ["answers"], "extensions": {"ext:key111": "value111", "ext:key222": "value222","ext:key333": "value333"}}}, "result": {"score":{"scaled":.79}, "completion": True, "success": True, "response": "shouted", "duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:dkey1": "dvalue1", "ext:dkey2":"dvalue2"}}, "context":{"registration": self.cguid4, "contextActivities": {"other": {"id": "act:NewActivityID22"}}, "revision": "food", "platform":"bard","language": "en-US","instructor":{"name":"bill", "mbox":"mailto:bill@bill.com"}, "extensions":{"ext:ckey111": "cval111","ext:ckey222": "cval222"}}}) self.existStmt5 = json.dumps({"object":{"objectType":"Agent","name":"jon","mbox":"mailto:jon@jon.com"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/created","display": {"en-US":"created"}}, "actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}}) self.existStmt6 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"}, "object":{"id": "act:test_activity"},"verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}}) self.existStmt7 = json.dumps({"object": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"}, "verb": {"id": "http://adlnet.gov/expapi/verbs/created","display": {"en-US":"created"}}, "actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}}) self.existStmt8 = json.dumps({"object": {"objectType":"Agent","name":"john","mbox":"mailto:john@john.com"}, "verb": {"id": "http://adlnet.gov/expapi/verbs/missed","display": {"en-US":"missed"}}, "actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}}) self.existStmt9 = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:sub@sub.com"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/missed"},"object":{"objectType":"SubStatement", "actor":{"objectType":"Agent","mbox":"mailto:ss@ss.com"},"verb": {"id":"nested:verb/url/nested"}, "object": {"objectType":"Activity", "id":"act:testex.com"}, "result":{"completion": True, "success": True, "response": "kicked"}, "context":{"registration": self.cguid6, "contextActivities": {"other": {"id": "act:NewActivityID"}},"revision": "foo", "platform":"bar", "language": "en-US", "extensions":{"ext:k1": "v1", "ext:k2": "v2"}}}}) self.existStmt10 = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:ref@ref.com"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/missed"},"object":{"objectType":"StatementRef", "id":str(self.exist_stmt_id)}}) # Put statements param = {"statementId":self.guid1} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt1 self.putresponse1 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse1.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=2)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid1).update(stored=time) param = {"statementId":self.guid3} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt3 self.putresponse3 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse3.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=3)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid3).update(stored=time) param = {"statementId":self.guid4} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt4 self.putresponse4 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse4.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=4)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid4).update(stored=time) self.secondTime = str((datetime.utcnow()+timedelta(seconds=4)).replace(tzinfo=utc).isoformat()) param = {"statementId":self.guid2} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt2 self.putresponse2 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse2.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=6)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid2).update(stored=time) param = {"statementId":self.guid5} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt5 self.putresponse5 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse5.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=7)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid5).update(stored=time) param = {"statementId":self.guid6} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt6 self.putresponse6 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse6.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=8)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid6).update(stored=time) param = {"statementId":self.guid7} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt7 self.putresponse7 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse7.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=9)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid7).update(stored=time) param = {"statementId":self.guid8} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt8 self.putresponse8 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse8.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=10)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid8).update(stored=time) param = {"statementId": self.guid9} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt9 self.putresponse9 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse9.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=11)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid9).update(stored=time) param = {"statementId": self.guid10} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt_payload = self.existStmt10 self.putresponse10 = self.client.put(path, stmt_payload, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(self.putresponse10.status_code, 204) time = retrieve_statement.convert_to_utc(str((datetime.utcnow()+timedelta(seconds=11)).replace(tzinfo=utc).isoformat())) stmt = Statement.objects.filter(statement_id=self.guid10).update(stored=time) def tearDown(self): if settings.ALLOW_EMPTY_HTTP_AUTH: settings.ALLOW_EMPTY_HTTP_AUTH = False if not settings.OAUTH_ENABLED: settings.OAUTH_ENABLED = True def test_post_with_no_valid_params(self): # Error will be thrown in statements class resp = self.client.post(reverse(statements), {"feet":"yes","hands": {"id":"http://example.com/test_post"}}, Authorization=self.auth, content_type="application/json", X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(resp.status_code, 400) def test_post(self): stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:t@t.com", "name":"bob"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}}, "object": {"id":"act:test_post"}}) response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 200) act = Activity.objects.get(activity_id="act:test_post") self.assertEqual(act.activity_id, "act:test_post") agent = Agent.objects.get(mbox="mailto:t@t.com") self.assertEqual(agent.name, "bob") def test_post_stmt_ref_no_existing_stmt(self): stmt = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:ref@ref.com"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/missed"},"object":{"objectType":"StatementRef", "id":"12345678-1234-5678-1234-567812345678"}}) response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 404) def test_post_with_actor(self): stmt = json.dumps({"actor":{"mbox":"mailto:mr.t@example.com"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}}, "object": {"id":"act:i.pity.the.fool"}}) response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 200) Agent.objects.get(mbox="mailto:mr.t@example.com") def test_list_post(self): stmts = json.dumps([{"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}}, "object": {"id":"act:test_list_post"}, "actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}}, {"verb":{"id": "http://adlnet.gov/expapi/verbs/failed","display": {"en-GB":"failed"}}, "object": {"id":"act:test_list_post1"}, "actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}}]) response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 200) activity1 = Activity.objects.get(activity_id="act:test_list_post") activity2 = Activity.objects.get(activity_id="act:test_list_post1") stmt1 = Statement.objects.get(object_activity=activity1) stmt2 = Statement.objects.get(object_activity=activity2) verb1 = Verb.objects.get(id=stmt1.verb.id) verb2 = Verb.objects.get(id=stmt2.verb.id) lang_map1 = verb1.display lang_map2 = verb2.display self.assertEqual(response.status_code, 200) self.assertEqual(stmt1.verb.verb_id, "http://adlnet.gov/expapi/verbs/passed") self.assertEqual(stmt2.verb.verb_id, "http://adlnet.gov/expapi/verbs/failed") self.assertEqual(lang_map1.keys()[0], "en-US") self.assertEqual(lang_map1.values()[0], "passed") self.assertEqual(lang_map2.keys()[0], "en-GB") self.assertEqual(lang_map2.values()[0], "failed") def test_put(self): guid = str(uuid.uuid1()) param = {"statementId":guid} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}}, "object": {"id":"act:test_put"},"actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}}) putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(putResponse.status_code, 204) stmt = Statement.objects.get(statement_id=guid) act = Activity.objects.get(activity_id="act:test_put") self.assertEqual(act.activity_id, "act:test_put") self.assertEqual(stmt.actor.mbox, "mailto:t@t.com") self.assertEqual(stmt.verb.verb_id, "http://adlnet.gov/expapi/verbs/passed") def test_put_with_substatement(self): con_guid = str(uuid.uuid1()) st_guid = str(uuid.uuid1()) param = {"statementId": st_guid} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt = json.dumps({"actor":{"objectType":"Agent","mbox":"mailto:sass@sass.com"}, "verb": {"id":"verb:verb/url/tested"}, "object":{"objectType":"SubStatement", "actor":{"objectType":"Agent","mbox":"mailto:ss@ss.com"},"verb": {"id":"verb:verb/url/nested"}, "object": {"objectType":"Activity", "id":"act:testex.com"}, "result":{"completion": True, "success": True, "response": "kicked"}, "context":{"registration": con_guid, "contextActivities": {"other": {"id": "act:NewActivityID"}},"revision": "foo", "platform":"bar", "language": "en-US", "extensions":{"ext:k1": "v1", "ext:k2": "v2"}}}}) response = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 204) path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth) self.assertEqual(get_response.status_code, 200) rsp = get_response.content self.assertIn("objectType",rsp) self.assertIn("SubStatement", rsp) self.assertIn("actor",rsp) self.assertIn("mailto:ss@ss.com",rsp) self.assertIn("verb",rsp) self.assertIn("verb:verb/url/nested", rsp) self.assertIn("Activity", rsp) self.assertIn("act:testex.com", rsp) self.assertIn("result", rsp) self.assertIn("completion",rsp) self.assertIn("success", rsp) self.assertIn("response", rsp) self.assertIn("kicked", rsp) self.assertIn("context", rsp) self.assertIn(con_guid, rsp) self.assertIn("contextActivities", rsp) self.assertIn("other", rsp) self.assertIn("revision", rsp) self.assertIn("foo", rsp) self.assertIn("platform", rsp) self.assertIn("bar", rsp) self.assertIn("language", rsp) self.assertIn("en-US", rsp) self.assertIn("extensions", rsp) self.assertIn("ext:k1", rsp) self.assertIn("v1", rsp) self.assertIn("ext:k2", rsp) self.assertIn("v2", rsp) def test_no_content_put(self): guid = str(uuid.uuid1()) param = {"statementId":guid} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt = json.dumps({}) putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(putResponse.status_code, 400) def test_existing_stmtID_put_put(self): guid = str(uuid.uuid1()) param = {"statementId":guid} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) exist_stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}}, "object": {"id":"act:activity"},"actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}}) first_put = self.client.put(path, exist_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(first_put.status_code, 204) stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}}, "object":{"id":"act:test_existing_put"}, "actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}}) putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(putResponse.status_code, 409) def test_existing_stmtID_put_post(self): guid = str(uuid.uuid1()) exist_stmt = json.dumps({"id": guid, "verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}}, "object": {"id":"act:activity"},"actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}}) post = self.client.post(reverse(statements), exist_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(post.status_code, 200) param = {"statementId":guid} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}}, "object":{"id":"act:test_existing_put"}, "actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}}) putResponse = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(putResponse.status_code, 409) def test_missing_stmtID_put(self): stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}}, "object": {"id":"act:act:test_put"},"actor":{"objectType":"Agent", "mbox":"mailto:t@t.com"}}) response = self.client.put(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 400) self.assertIn(response.content, "Error -- statements - method = PUT, but no statementId parameter or ID given in statement") def test_get(self): param = {"statementId":self.guid1} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) getResponse = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth) self.assertEqual(getResponse.status_code, 200) rsp = getResponse.content self.assertIn(self.guid1, rsp) def test_get_no_existing_ID(self): param = {"statementId":"aaaaaa"} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) getResponse = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth) self.assertEqual(getResponse.status_code, 404) def test_get_no_statementid(self): getResponse = self.client.get(reverse(statements), X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth) self.assertEqual(getResponse.status_code, 200) jsn = json.loads(getResponse.content) self.assertEqual(len(jsn["statements"]), 11) # Sever activities are PUT-contextActivites create 3 more def test_number_of_activities(self): acts = len(Activity.objects.all()) self.assertEqual(9, acts) def test_update_activity_correct_auth(self): stmt = json.dumps({"verb": {"id":"verb:verb/url/changed-act"},"actor":{"objectType":"Agent", "mbox":"mailto:l@l.com"}, "object": {"objectType": "Activity", "id":"act:foogie", "definition": {"name": {"en-US":"testname3"},"description": {"en-US":"testdesc3"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction","interactionType": "fill-in","correctResponsesPattern": ["answer"], "extensions": {"ext:key1": "value1", "ext:key2": "value2","ext:key3": "value3"}}}, "result": {"score":{"scaled":.85}, "completion": True, "success": True, "response": "kicked", "duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:key1": "value1", "ext:key2":"value2"}}, "context":{"registration": self.cguid8, "contextActivities": {"other": {"id": "act:NewActivityID2"}}, "revision": "food", "platform":"bard","language": "en-US", "extensions":{"ext:ckey1": "cval1", "ext:ckey2": "cval2"}}}) post_response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(post_response.status_code, 200) act = Activity.objects.get(activity_id="act:foogie") name_set = act.activity_definition_name desc_set = act.activity_definition_description self.assertEqual(name_set.keys()[1], "en-US") self.assertEqual(name_set.values()[1], "testname3") self.assertEqual(name_set.keys()[0], "en-GB") self.assertEqual(name_set.values()[0], "altname") self.assertEqual(desc_set.keys()[1], "en-US") self.assertEqual(desc_set.values()[1], "testdesc3") self.assertEqual(desc_set.keys()[0], "en-GB") self.assertEqual(desc_set.values()[0], "altdesc") def test_cors_post_put(self): st_id = str(uuid.uuid1()) content = {"verb":{"id":"verb:verb/url"}, "actor":{"objectType":"Agent", "mbox": "mailto:r@r.com"}, "object": {"id":"act:test_cors_post_put"}} bdy = "statementId=%s&content=%s&Content-Type=application/json&X-Experience-API-Version=1.0.0" % (st_id, content) path = "%s?%s" % (reverse(statements), urllib.urlencode({"method":"PUT"})) response = self.client.post(path, bdy, content_type="application/x-www-form-urlencoded", Authorization=self.auth) self.assertEqual(response.status_code, 204) act = Activity.objects.get(activity_id="act:test_cors_post_put") self.assertEqual(act.activity_id, "act:test_cors_post_put") def test_issue_put(self): stmt_id = "33f60b35-e1b2-4ddc-9c6f-7b3f65244430" stmt = json.dumps({"verb":{"id":"verb:verb/iri"},"object":{"id":"act:scorm.com/JsTetris_TCAPI","definition":{"type":"type:media", "name":{"en-US":"Js Tetris - Tin Can Prototype"},"description":{"en-US":"A game of tetris."}}}, "context":{"contextActivities":{"grouping":{"id":"act:scorm.com/JsTetris_TCAPI"}}, "registration":"6b1091be-2833-4886-b4a6-59e5e0b3c3f4"}, "actor":{"mbox":"mailto:tom.creighton.ctr@adlnet.gov","name":"Tom Creighton"}}) path = "%s?%s" % (reverse(statements), urllib.urlencode({"statementId":stmt_id})) put_stmt = self.client.put(path, stmt, content_type="application/json", X_Experience_API_Version=settings.XAPI_VERSION,Authorization=self.auth) self.assertEqual(put_stmt.status_code, 204) def test_post_with_group(self): ot = "Group" name = "the group ST" mbox = "mailto:the.groupST@example.com" stmt = json.dumps({"actor":{"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:agentA@example.com"}, {"name":"agentB","mbox":"mailto:agentB@example.com"}]},"verb":{"id": "http://verb/iri/created", "display":{"en-US":"created"}}, "object": {"id":"act:i.pity.the.fool"}}) response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 200) g = Agent.objects.get(mbox="mailto:the.groupST@example.com") self.assertEquals(g.name, name) self.assertEquals(g.mbox, mbox) mems = g.member.values_list("name", flat=True) self.assertEquals(len(mems), 2) self.assertIn("agentA", mems) self.assertIn("agentB", mems) def test_issue_put_no_version_header(self): stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244431' stmt = json.dumps({"verb":"verb:completed","object":{"id":"act:scorm.com/JsTetris_TCAPI/level2", "definition":{"type":"media","name":{"en-US":"Js Tetris Level2"}, "description":{"en-US":"Starting at 1, the higher the level, the harder the game."}}}, "result":{"extensions":{"ext:time":104,"ext:apm":229,"ext:lines":5},"score":{"raw":9911,"min":0}}, "context":{"contextActivities":{"grouping":{"id":"act:scorm.com/JsTetris_TCAPI"}}, "registration":"b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"}, "actor":{"name":"tom creighton","mbox":"mailto:tom@example.com"}}) path = '%s?%s' % (reverse(statements), urllib.urlencode({"statementId":stmt_id})) put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth) self.assertEqual(put_stmt.status_code, 400) def test_issue_put_wrong_version_header(self): stmt_id = '33f60b35-e1b2-4ddc-9c6f-7b3f65244432' stmt = json.dumps({"verb":"verb:completed","object":{"id":"act:scorm.com/JsTetris_TCAPI/level2", "definition":{"type":"media","name":{"en-US":"Js Tetris Level2"}, "description":{"en-US":"Starting at 1, the higher the level, the harder the game."}}}, "result":{"extensions":{"ext:time":104,"ext:apm":229,"ext:lines":5},"score":{"raw":9911,"min":0}}, "context":{"contextActivities":{"grouping":{"id":"act:scorm.com/JsTetris_TCAPI"}}, "registration":"b7be7d9d-bfe2-4917-8ccd-41a0d18dd953"}, "actor":{"name":"tom creighton","mbox":"mailto:tom@example.com"}}) path = '%s?%s' % (reverse(statements), urllib.urlencode({"statementId":stmt_id})) put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version="0.90") self.assertEqual(put_stmt.status_code, 400) # Use this test to make sure stmts are being returned correctly with all data - doesn't check timestamp and stored fields def test_all_fields_activity_as_object(self): nested_st_id = str(uuid.uuid1()) nest_param = {"statementId":nested_st_id} nest_path = "%s?%s" % (reverse(statements), urllib.urlencode(nest_param)) nested_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tincan@adlnet.gov"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}}, "object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement"}}) put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(put_sub_stmt.status_code, 204) stmt_id = str(uuid.uuid1()) context_id= str(uuid.uuid1()) param = {"statementId":stmt_id} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt = json.dumps({"actor":{"objectType":"Agent","name": "Lou Wolford","account":{"homePage":"http://example.com", "name":"uniqueName"}}, "verb":{"id": "http://adlnet.gov/expapi/verbs/created","display": {"en-US":"created", "en-GB":"made"}}, "object": {"objectType": "Activity", "id":"http:adlnet.gov/my/Activity/URL", "definition": {"name": {"en-US":"actName", "en-GB": "anotherActName"}, "description": {"en-US":"This is my activity description.", "en-GB": "This is another activity description."}, "type": "http://www.adlnet.gov/experienceapi/activity-types/http://adlnet.gov/expapi/activities/cmi.interaction", "interactionType": "choice", "correctResponsesPattern": ["golf", "tetris"], "choices":[{"id": "golf", "description": {"en-US":"Golf Example", "en-GB": "GOLF"}}, {"id": "tetris","description":{"en-US": "Tetris Example", "en-GB": "TETRIS"}}, {"id":"facebook", "description":{"en-US":"Facebook App", "en-GB": "FACEBOOK"}}, {"id":"scrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}], "extensions": {"ext:key1": "value1", "ext:key2": "value2","ext:key3": "value3"}}}, "result": {"score":{"scaled":.85, "raw": 85, "min":0, "max":100}, "completion": True, "success": True, "response": "Well done", "duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey1": "resultValue1", "ext:resultKey2":"resultValue2"}}, "context":{"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}, "grouping":{"id":"http://groupingID"} }, "revision": "Spelling error in choices.", "platform":"Platform is web browser.","language": "en-US", "statement":{"objectType":"StatementRef", "id":str(nested_st_id)}, "extensions":{"ext:contextKey1": "contextVal1","ext:contextKey2": "contextVal2"}}, "timestamp":self.firstTime}) put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(put_stmt.status_code, 204) get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth) the_returned = json.loads(get_response.content) self.assertEqual(the_returned['id'], stmt_id) self.assertEqual(the_returned['actor']['objectType'], 'Agent') self.assertEqual(the_returned['actor']['name'], 'Lou Wolford') self.assertEqual(the_returned['actor']['account']['name'], 'uniqueName') self.assertEqual(the_returned['actor']['account']['homePage'], 'http://example.com') self.assertEqual(the_returned['verb']['id'], 'http://adlnet.gov/expapi/verbs/created') self.assertEqual(the_returned['verb']['display']['en-GB'], 'made') self.assertEqual(the_returned['verb']['display']['en-US'], 'created') self.assertEqual(the_returned['result']['completion'], True) self.assertEqual(the_returned['result']['duration'], 'P3Y6M4DT12H30M5S') self.assertEqual(the_returned['result']['extensions']['ext:resultKey1'], 'resultValue1') self.assertEqual(the_returned['result']['extensions']['ext:resultKey2'], 'resultValue2') self.assertEqual(the_returned['result']['response'], 'Well done') self.assertEqual(the_returned['result']['score']['max'], 100) self.assertEqual(the_returned['result']['score']['min'], 0) self.assertEqual(the_returned['result']['score']['raw'], 85) self.assertEqual(the_returned['result']['score']['scaled'], 0.85) self.assertEqual(the_returned['result']['success'], True) self.assertEqual(the_returned['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test') self.assertEqual(the_returned['context']['extensions']['ext:contextKey1'], 'contextVal1') self.assertEqual(the_returned['context']['extensions']['ext:contextKey2'], 'contextVal2') self.assertEqual(the_returned['context']['language'], 'en-US') self.assertEqual(the_returned['context']['platform'], 'Platform is web browser.') self.assertEqual(the_returned['context']['registration'], context_id) self.assertEqual(the_returned['context']['revision'], 'Spelling error in choices.') self.assertEqual(the_returned['context']['statement']['id'], str(nested_st_id)) self.assertEqual(the_returned['context']['statement']['objectType'], 'StatementRef') # Use this test to make sure stmts are being returned correctly with all data - doesn't check timestamp, stored fields def test_all_fields_agent_as_object(self): nested_st_id = str(uuid.uuid1()) nest_param = {"statementId":nested_st_id} nest_path = "%s?%s" % (reverse(statements), urllib.urlencode(nest_param)) nested_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tincan@adlnet.gov"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}}, "object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement"}}) put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(put_sub_stmt.status_code, 204) stmt_id = str(uuid.uuid1()) context_id= str(uuid.uuid1()) param = {"statementId":stmt_id} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) msha = hashlib.sha1("tom@example.com").hexdigest() stmt = json.dumps({"actor":{"objectType":"Agent","name": "Lou Wolford","account":{"homePage":"http://example.com", "name":"louUniqueName"}}, "verb":{"id": "http://adlnet.gov/expapi/verbs/helped","display": {"en-US":"helped", "en-GB":"assisted"}}, "object": {"objectType":"Agent","name": "Tom Creighton","mbox_sha1sum":msha}, "result": {"score":{"scaled":.85, "raw": 85, "min":0, "max":100}, "completion": True, "success": True, "response": "Well done", "duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey1": "resultValue1", "ext:resultKey2":"resultValue2"}}, "context":{"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}}, "language": "en-US", "statement":{"objectType":"StatementRef", "id":str(nested_st_id)}, "extensions":{"ext:contextKey1": "contextVal1","ext:contextKey2": "contextVal2"}}, "timestamp":self.firstTime}) put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(put_stmt.status_code, 204) get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth) the_returned = json.loads(get_response.content) self.assertEqual(the_returned['id'], stmt_id) self.assertEqual(the_returned['actor']['objectType'], 'Agent') self.assertEqual(the_returned['actor']['name'], 'Lou Wolford') self.assertEqual(the_returned['actor']['account']['name'], 'louUniqueName') self.assertEqual(the_returned['actor']['account']['homePage'], 'http://example.com') self.assertEqual(the_returned['verb']['id'], 'http://adlnet.gov/expapi/verbs/helped') self.assertEqual(the_returned['verb']['display']['en-GB'], 'assisted') self.assertEqual(the_returned['verb']['display']['en-US'], 'helped') self.assertEqual(the_returned['result']['completion'], True) self.assertEqual(the_returned['result']['duration'], 'P3Y6M4DT12H30M5S') self.assertEqual(the_returned['result']['extensions']['ext:resultKey1'], 'resultValue1') self.assertEqual(the_returned['result']['extensions']['ext:resultKey2'], 'resultValue2') self.assertEqual(the_returned['result']['response'], 'Well done') self.assertEqual(the_returned['result']['score']['max'], 100) self.assertEqual(the_returned['result']['score']['min'], 0) self.assertEqual(the_returned['result']['score']['raw'], 85) self.assertEqual(the_returned['result']['score']['scaled'], 0.85) self.assertEqual(the_returned['result']['success'], True) self.assertEqual(the_returned['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test') self.assertEqual(the_returned['context']['extensions']['ext:contextKey1'], 'contextVal1') self.assertEqual(the_returned['context']['extensions']['ext:contextKey2'], 'contextVal2') self.assertEqual(the_returned['context']['language'], 'en-US') self.assertEqual(the_returned['context']['registration'], context_id) self.assertEqual(the_returned['context']['statement']['id'], str(nested_st_id)) self.assertEqual(the_returned['context']['statement']['objectType'], 'StatementRef') self.assertEqual(the_returned['object']['objectType'], 'Agent') self.assertEqual(the_returned['object']['name'], 'Tom Creighton') self.assertEqual(the_returned['object']['mbox_sha1sum'], 'edb97c2848fc47bdd2091028de8a3b1b24933752') # Use this test to make sure stmts are being returned correctly with all data - doesn't check timestamps or stored fields def test_all_fields_substatement_as_object(self): nested_st_id = str(uuid.uuid1()) nest_param = {"statementId":nested_st_id} nest_path = "%s?%s" % (reverse(statements), urllib.urlencode(nest_param)) nested_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tincannest@adlnet.gov"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed", "en-GB":"graded"}}, "object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement"}}) put_sub_stmt = self.client.put(nest_path, nested_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(put_sub_stmt.status_code, 204) nested_sub_st_id = str(uuid.uuid1()) nest_sub_param = {"statementId":nested_sub_st_id} nest_sub_path = "%s?%s" % (reverse(statements), urllib.urlencode(nest_sub_param)) nested_sub_stmt = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tincannestsub@adlnet.gov"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/verb","display": {"en-US":"verb", "en-GB":"altVerb"}}, "object":{"id":"http://example.adlnet.gov/tincan/example/simplenestedsubstatement"}}) put_nest_sub_stmt = self.client.put(nest_sub_path, nested_sub_stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(put_nest_sub_stmt.status_code, 204) stmt_id = str(uuid.uuid1()) context_id= str(uuid.uuid1()) sub_context_id= str(uuid.uuid1()) param = {"statementId":stmt_id} path = "%s?%s" % (reverse(statements), urllib.urlencode(param)) stmt = json.dumps({"actor":{"objectType":"Agent","name": "Lou Wolford","account":{"homePage":"http://example.com", "name":"louUniqueName"}}, "verb":{"id": "http://adlnet.gov/expapi/verbs/said","display": {"en-US":"said", "en-GB":"talked"}}, "object": {"objectType": "SubStatement", "actor":{"objectType":"Agent","name":"Tom Creighton","mbox": "mailto:tom@adlnet.gov"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed", "en-GB": "Graded"}}, "object":{"id":"http://example.adlnet.gov/tincan/example/simplestatement", 'definition': {'name': {'en-US':'SubStatement name'}, 'description': {'en-US':'SubStatement description'}, 'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'matching', 'correctResponsesPattern': ['lou.3,tom.2,andy.1'],'source':[{'id': 'lou', 'description': {'en-US':'Lou', 'it': 'Luigi'}},{'id': 'tom','description':{'en-US': 'Tom', 'it':'Tim'}}, {'id':'andy', 'description':{'en-US':'Andy'}}],'target':[{'id':'1', 'description':{'en-US': 'ADL LRS'}},{'id':'2','description':{'en-US': 'lrs'}}, {'id':'3', 'description':{'en-US': 'the adl lrs', 'en-CH': 'the lrs'}}]}}, "result": {"score":{"scaled":.50, "raw": 50, "min":1, "max":51}, "completion": True, "success": True, "response": "Poorly done", "duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey11": "resultValue11", "ext:resultKey22":"resultValue22"}}, "context":{"registration": sub_context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test/nest"}}, "language": "en-US", "statement":{"objectType":"StatementRef", "id":str(nested_sub_st_id)}, "extensions":{"ext:contextKey11": "contextVal11","ext:contextKey22": "contextVal22"}}}, "result": {"score":{"scaled":.85, "raw": 85, "min":0, "max":100}, "completion": True, "success": True, "response": "Well done", "duration": "P3Y6M4DT12H30M5S", "extensions":{"ext:resultKey1": "resultValue1", "ext:resultKey2":"resultValue2"}}, "context":{"registration": context_id, "contextActivities": {"other": {"id": "http://example.adlnet.gov/tincan/example/test"}}, "language": "en-US", "statement":{"objectType":"StatementRef", "id":str(nested_st_id)}, "extensions":{"ext:contextKey1": "contextVal1","ext:contextKey2": "contextVal2"}}, "timestamp":self.firstTime}) put_stmt = self.client.put(path, stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(put_stmt.status_code, 204) get_response = self.client.get(path, X_Experience_API_Version=settings.XAPI_VERSION, Authorization=self.auth) the_returned = json.loads(get_response.content) self.assertEqual(the_returned['id'], stmt_id) self.assertEqual(the_returned['actor']['objectType'], 'Agent') self.assertEqual(the_returned['actor']['name'], 'Lou Wolford') self.assertEqual(the_returned['actor']['account']['name'], 'louUniqueName') self.assertEqual(the_returned['actor']['account']['homePage'], 'http://example.com') self.assertEqual(the_returned['verb']['id'], 'http://adlnet.gov/expapi/verbs/said') self.assertEqual(the_returned['verb']['display']['en-GB'], 'talked') self.assertEqual(the_returned['verb']['display']['en-US'], 'said') self.assertEqual(the_returned['object']['actor']['objectType'], 'Agent') self.assertEqual(the_returned['object']['actor']['name'], 'Tom Creighton') self.assertEqual(the_returned['object']['actor']['mbox'], 'mailto:tom@adlnet.gov') self.assertEqual(the_returned['object']['context']['registration'], sub_context_id) self.assertEqual(the_returned['object']['context']['language'], 'en-US') self.assertEqual(the_returned['object']['context']['statement']['id'], str(nested_sub_st_id)) self.assertEqual(the_returned['object']['context']['statement']['objectType'], 'StatementRef') self.assertEqual(the_returned['object']['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test/nest') self.assertEqual(the_returned['object']['context']['extensions']['ext:contextKey11'], 'contextVal11') self.assertEqual(the_returned['object']['context']['extensions']['ext:contextKey22'], 'contextVal22') self.assertEqual(the_returned['object']['object']['id'], 'http://example.adlnet.gov/tincan/example/simplestatement') self.assertEqual(the_returned['object']['object']['definition']['type'], 'http://adlnet.gov/expapi/activities/cmi.interaction') self.assertEqual(the_returned['object']['object']['definition']['description']['en-US'], 'SubStatement description') self.assertEqual(the_returned['object']['object']['definition']['interactionType'], 'matching') self.assertEqual(the_returned['object']['object']['definition']['name']['en-US'], 'SubStatement name') # arrays.. testing slightly differently source_str = json.dumps(the_returned['object']['object']['definition']['source']) self.assertIn('description', source_str) self.assertIn('id', source_str) self.assertIn('Lou', source_str) self.assertIn('Luigi', source_str) self.assertIn('lou', source_str) self.assertIn('Tom', source_str) self.assertIn('Tim', source_str) self.assertIn('tom', source_str) self.assertIn('Andy', source_str) self.assertIn('andy', source_str) target_str = json.dumps(the_returned['object']['object']['definition']['target']) self.assertIn('description', target_str) self.assertIn('id', target_str) self.assertIn('ADL LRS', target_str) self.assertIn('1', target_str) self.assertIn('lrs', target_str) self.assertIn('2', target_str) self.assertIn('the lrs', target_str) self.assertIn('the adl lrs', target_str) self.assertIn('3', target_str) self.assertEqual(the_returned['object']['objectType'], 'SubStatement') self.assertEqual(the_returned['object']['result']['completion'], True) self.assertEqual(the_returned['object']['result']['duration'], 'P3Y6M4DT12H30M5S') self.assertEqual(the_returned['object']['result']['extensions']['ext:resultKey11'], 'resultValue11') self.assertEqual(the_returned['object']['result']['extensions']['ext:resultKey22'], 'resultValue22') self.assertEqual(the_returned['object']['result']['response'], 'Poorly done') self.assertEqual(the_returned['object']['result']['score']['max'], 51) self.assertEqual(the_returned['object']['result']['score']['min'], 1) self.assertEqual(the_returned['object']['result']['score']['raw'], 50) self.assertEqual(the_returned['object']['result']['score']['scaled'], 0.5) self.assertEqual(the_returned['object']['result']['success'], True) self.assertEqual(the_returned['object']['verb']['id'], 'http://adlnet.gov/expapi/verbs/assess') self.assertEqual(the_returned['object']['verb']['display']['en-GB'], 'Graded') self.assertEqual(the_returned['object']['verb']['display']['en-US'], 'assessed') self.assertEqual(the_returned['result']['completion'], True) self.assertEqual(the_returned['result']['duration'], 'P3Y6M4DT12H30M5S') self.assertEqual(the_returned['result']['extensions']['ext:resultKey1'], 'resultValue1') self.assertEqual(the_returned['result']['extensions']['ext:resultKey2'], 'resultValue2') self.assertEqual(the_returned['result']['response'], 'Well done') self.assertEqual(the_returned['result']['score']['max'], 100) self.assertEqual(the_returned['result']['score']['min'], 0) self.assertEqual(the_returned['result']['score']['raw'], 85) self.assertEqual(the_returned['result']['score']['scaled'], 0.85) self.assertEqual(the_returned['result']['success'], True) self.assertEqual(the_returned['context']['contextActivities']['other'][0]['id'], 'http://example.adlnet.gov/tincan/example/test') self.assertEqual(the_returned['context']['extensions']['ext:contextKey1'], 'contextVal1') self.assertEqual(the_returned['context']['extensions']['ext:contextKey2'], 'contextVal2') self.assertEqual(the_returned['context']['language'], 'en-US') self.assertEqual(the_returned['context']['registration'], context_id) self.assertEqual(the_returned['context']['statement']['id'], nested_st_id) self.assertEqual(the_returned['context']['statement']['objectType'], 'StatementRef') # Third stmt in list is missing actor - should throw error and perform cascading delete on first three statements def test_post_list_rollback(self): cguid1 = str(uuid.uuid1()) stmts = json.dumps([{"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-failed","display": {"en-US":"wrong-failed"}},"object": {"id":"act:test_wrong_list_post2"}, "actor":{"objectType":"Agent", "mbox":"mailto:wrong-t@t.com"},"result": {"score":{"scaled":.99}, "completion": True, "success": True, "response": "wrong", "extensions":{"ext:resultwrongkey1": "value1", "ext:resultwrongkey2":"value2"}}}, {"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked","display": {"en-US":"wrong-kicked"}}, "object": {"objectType": "Activity", "id":"act:test_wrong_list_post", "definition": {"name": {"en-US":"wrongactName", "en-GB": "anotherActName"}, "description": {"en-US":"This is my activity description.", "en-GB": "This is another activity description."}, "type": "http://adlnet.gov/expapi/activities/http://adlnet.gov/expapi/activities/cmi.interaction", "interactionType": "choice", "correctResponsesPattern": ["wronggolf", "wrongtetris"], "choices":[{"id": "wronggolf", "description": {"en-US":"Golf Example", "en-GB": "GOLF"}}, {"id": "wrongtetris","description":{"en-US": "Tetris Example", "en-GB": "TETRIS"}}, {"id":"wrongfacebook", "description":{"en-US":"Facebook App", "en-GB": "FACEBOOK"}}, {"id":"wrongscrabble", "description": {"en-US": "Scrabble Example", "en-GB": "SCRABBLE"}}], "extensions": {"ext:wrongkey1": "wrongvalue1", "ext:wrongkey2": "wrongvalue2","ext:wrongkey3": "wrongvalue3"}}}, "actor":{"objectType":"Agent", "mbox":"mailto:wrong-t@t.com"}}, {"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-passed","display": {"en-US":"wrong-passed"}},"object": {"id":"act:test_wrong_list_post1"}, "actor":{"objectType":"Agent", "mbox":"mailto:wrong-t@t.com"},"context":{"registration": cguid1, "contextActivities": {"other": {"id": "act:wrongActivityID2"}}, "revision": "wrong", "platform":"wrong","language": "en-US", "extensions":{"ext:wrongkey1": "wrongval1", "ext:wrongkey2": "wrongval2"}}}, {"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked","display": {"en-US":"wrong-kicked"}},"object": {"id":"act:test_wrong_list_post2"}}, {"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked","display": {"en-US":"wrong-kicked"}},"object": {"id":"act:test_wrong_list_post4"}, "actor":{"objectType":"Agent", "mbox":"wrong-t@t.com"}}]) response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 400) self.assertIn('actor is missing in Statement', response.content) verbs = Verb.objects.filter(verb_id__contains='wrong') activities = Activity.objects.filter(activity_id__contains='test_wrong_list_post') stmts = Statement.objects.all() # 11 statements from setup self.assertEqual(len(stmts), 11) self.assertEqual(len(verbs), 0) self.assertEqual(len(activities), 0) def test_post_list_rollback_part_2(self): stmts = json.dumps([{"object": {"objectType":"Agent","name":"john","mbox":"mailto:john@john.com"}, "verb": {"id": "http://adlnet.gov/expapi/verbs/wrong","display": {"en-US":"wrong"}}, "actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}}, {"verb":{"id": "http://adlnet.gov/expapi/verbs/created"}, "object": {"objectType": "Activity", "id":"act:foogie", "definition": {"name": {"en-US":"testname2", "en-GB": "altname"}, "description": {"en-US":"testdesc2", "en-GB": "altdesc"}, "type": "http://adlnet.gov/expapi/activities/cmi.interaction", "interactionType": "fill-in","correctResponsesPattern": ["answer"]}}, "actor":{"objectType":"Agent", "mbox":"mailto:wrong-t@t.com"}}, {"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked"},"object": {"id":"act:test_wrong_list_post2"}}]) response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 400) self.assertIn('actor is missing in Statement', response.content) created_verbs = Verb.objects.filter(verb_id__contains='http://adlnet.gov/expapi/verbs/created') wrong_verbs = Verb.objects.filter(verb_id__contains='http://adlnet.gov/expapi/verbs/wrong') activities = Activity.objects.filter(activity_id='act:foogie') stmts = Statement.objects.all() wrong_agent = Agent.objects.filter(mbox='mailto:wrong-t@t.com') john_agent = Agent.objects.filter(mbox='mailto:john@john.com') s_agent = Agent.objects.filter(mbox='mailto:s@s.com') auth_agent = Agent.objects.filter(mbox='mailto:test1@tester.com') self.assertEqual(len(created_verbs), 1) # Both verbs from the first and last stmts in the list would still be there self.assertEqual(len(wrong_verbs), 0) self.assertEqual(len(activities), 1) self.assertEqual(len(stmts), 11) self.assertEqual(len(wrong_agent), 0) self.assertEqual(len(john_agent), 1) self.assertEqual(len(s_agent), 1) self.assertEqual(len(auth_agent), 0) def test_post_list_rollback_with_void(self): stmts = json.dumps([{"actor":{"objectType":"Agent","mbox":"mailto:only-s@s.com"}, "object": {"objectType":"StatementRef","id":str(self.exist_stmt_id)}, "verb": {"id": "http://adlnet.gov/expapi/verbs/voided","display": {"en-US":"voided"}}}, {"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked"},"object": {"id":"act:test_wrong_list_post2"}}]) response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 400) self.assertIn('actor is missing in Statement', response.content) voided_st = Statement.objects.get(statement_id=str(self.exist_stmt_id)) voided_verb = Verb.objects.filter(verb_id__contains='voided') only_actor = Agent.objects.filter(mbox="mailto:only-s@s.com") stmts = Statement.objects.all() self.assertEqual(len(stmts), 11) self.assertEqual(voided_st.voided, False) self.assertEqual(len(voided_verb), 0) self.assertEqual(len(only_actor), 0) def test_post_list_rollback_with_subs(self): sub_context_id = str(uuid.uuid1()) stmts = json.dumps([{"actor":{"objectType":"Agent","mbox":"mailto:wrong-s@s.com"}, "verb": {"id": "http://adlnet.gov/expapi/verbs/wrong","display": {"en-US":"wrong"}}, "object": {"objectType":"Agent","name":"john","mbox":"mailto:john@john.com"}}, {"actor":{"objectType":"Agent","mbox":"mailto:s@s.com"}, "verb": {"id": "http://adlnet.gov/expapi/verbs/wrong-next","display": {"en-US":"wrong-next"}}, "object":{"objectType":"SubStatement", "actor":{"objectType":"Agent","mbox":"mailto:wrong-ss@ss.com"},"verb": {"id":"http://adlnet.gov/expapi/verbs/wrong-sub"}, "object": {"objectType":"Activity", "id":"act:wrong-testex.com"}, "result":{"completion": True, "success": True, "response": "sub-wrong-kicked"}, "context":{"registration": sub_context_id, "contextActivities": {"other": {"id": "act:sub-wrong-ActivityID"}},"revision": "foo", "platform":"bar", "language": "en-US", "extensions":{"ext:wrong-k1": "v1", "ext:wrong-k2": "v2"}}}}, {"verb":{"id": "http://adlnet.gov/expapi/verbs/wrong-kicked"},"object": {"id":"act:test_wrong_list_post2"}}]) response = self.client.post(reverse(statements), stmts, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 400) self.assertIn('actor is missing in Statement', response.content) s_agent = Agent.objects.filter(mbox="mailto:wrong-s@s.com") ss_agent = Agent.objects.filter(mbox="mailto:wrong-ss@ss.com") john_agent = Agent.objects.filter(mbox="mailto:john@john.com") subs = SubStatement.objects.all() wrong_verb = Verb.objects.filter(verb_id__contains="wrong") activities = Activity.objects.filter(activity_id__contains="wrong") stmts = Statement.objects.all() self.assertEqual(len(stmts), 11) self.assertEqual(len(s_agent), 0) self.assertEqual(len(ss_agent), 0) self.assertEqual(len(john_agent), 1) # Only 1 sub from setup self.assertEqual(len(subs), 1) self.assertEqual(len(wrong_verb), 0) self.assertEqual(len(activities), 0) def test_activity_definition_change(self): username_1 = "tester1" email_1 = "test1@tester.com" password_1 = "test" auth_1 = "Basic %s" % base64.b64encode("%s:%s" % (username_1, password_1)) form_1 = {"username":username_1, "email":email_1,"password":password_1,"password2":password_1} response_1 = self.client.post(reverse(register),form_1, X_Experience_API_Version=settings.XAPI_VERSION) username_2 = "tester2" email_2 = "test2@tester.com" password_2 = "test2" auth_2 = "Basic %s" % base64.b64encode("%s:%s" % (username_2, password_2)) form_2 = {"username":username_2, "email":email_2,"password":password_2,"password2":password_2} response_2 = self.client.post(reverse(register),form_2, X_Experience_API_Version=settings.XAPI_VERSION) # Should have no definition stmt_1 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"}, "object":{"id": "act:test_activity_change"}, "verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}}) response_1 = self.client.post(reverse(statements), stmt_1, content_type="application/json", Authorization=auth_1, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response_1.status_code, 200) user1_agent = Agent.objects.get(mbox="mailto:test1@tester.com") act = Activity.objects.get(activity_id="act:test_activity_change").to_dict() self.assertEqual(act["id"], "act:test_activity_change") with self.assertRaises(KeyError): act["definition"] acts = Activity.objects.filter(activity_id="act:test_activity_change").count() self.assertEqual(acts, 1) # Creates local act for other user stmt_2 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"}, "object":{"id": "act:test_activity_change", "definition":{"name":{"en-US": "fail_test"}}}, "verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}}) response_2 = self.client.post(reverse(statements), stmt_2, content_type="application/json", Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION) user2_agent = Agent.objects.get(mbox="mailto:test2@tester.com") self.assertEqual(response_2.status_code, 200) act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict() self.assertEqual(act["id"], "act:test_activity_change") self.assertIn('definition', act) acts = Activity.objects.filter(activity_id="act:test_activity_change").count() self.assertEqual(acts, 2) # Should update local version of activity with definition for that user response_3 = self.client.post(reverse(statements), stmt_1, content_type="application/json", Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response_3.status_code, 200) act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict() self.assertEqual(act["id"], "act:test_activity_change") self.assertIn('definition', act) acts = Activity.objects.filter(activity_id="act:test_activity_change").count() self.assertEqual(acts, 2) # Should have new definition for canonical since user is owner stmt_3 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"}, "object":{"id": "act:test_activity_change", "definition":{"name":{"en-US": "foo"}}}, "verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}}) response_4 = self.client.post(reverse(statements), stmt_3, content_type="application/json", Authorization=auth_1, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response_4.status_code, 200) act = Activity.objects.get(activity_id="act:test_activity_change", authority=user1_agent).to_dict() self.assertEqual(act["id"], "act:test_activity_change") self.assertEqual(act["definition"], {"name":{"en-US": "foo"}}) # Should have updated local activity for that user with new definition response_5 = self.client.post(reverse(statements), stmt_3, content_type="application/json", Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response_5.status_code, 200) act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict() self.assertEqual(act["id"], "act:test_activity_change") self.assertEqual(act["definition"], {"name":{"en-US": "foo"}}) acts = Activity.objects.filter(activity_id="act:test_activity_change").count() self.assertEqual(acts, 2) # Should update local version of that activity for that user stmt_4 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"}, "object":{"id": "act:test_activity_change", "definition":{"name":{"en-US": "bar"}}}, "verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}}) response_6 = self.client.post(reverse(statements), stmt_4, content_type="application/json", Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response_6.status_code, 200) act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict() self.assertEqual(act["id"], "act:test_activity_change") self.assertEqual(act["definition"], {"name":{"en-US": "bar"}}) acts = Activity.objects.filter(activity_id="act:test_activity_change").count() self.assertEqual(acts, 2) # Should have replaced name in def for local act of that user stmt_5 = json.dumps({"actor": {"objectType":"Agent","name":"max","mbox":"mailto:max@max.com"}, "object":{"id": "act:test_activity_change", "definition":{"name":{"fr": "bar"}}}, "verb":{"id": "http://adlnet.gov/expapi/verbs/created", "display": {"en-US":"created"}}}) response_7 = self.client.post(reverse(statements), stmt_5, content_type="application/json", Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response_7.status_code, 200) act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict() self.assertEqual(act["id"], "act:test_activity_change") self.assertIn("fr", act['definition']['name']) acts = Activity.objects.filter(activity_id="act:test_activity_change").count() self.assertEqual(acts, 2) # Can't remove definition if it already exists - should still be there response_8 = self.client.post(reverse(statements), stmt_1, content_type="application/json", Authorization=auth_2, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response_8.status_code, 200) act = Activity.objects.get(activity_id="act:test_activity_change", authority=user2_agent).to_dict() self.assertEqual(act["id"], "act:test_activity_change") self.assertIn("definition", act.keys()) acts = Activity.objects.filter(activity_id="act:test_activity_change").count() self.assertEqual(acts, 2) # Check canonical of last stmt returned from query to make sure it contains the definition param = {"agent":{"mbox":"mailto:max@max.com"}, "format":"canonical", "activity":"act:test_activity_change"} path = "%s?%s" % (reverse(statements),urllib.urlencode(param)) r = self.client.get(path, X_Experience_API_Version="1.0", Authorization=auth_1) self.assertEqual(r.status_code, 200) first_stmt = json.loads(r.content)["statements"][0] self.assertEqual(first_stmt["object"]["definition"], {"name":{"en-US": "foo"}}) def test_post_with_non_oauth_not_existing_group(self): ot = "Group" name = "the group ST" mbox = "mailto:the.groupST@example.com" stmt = json.dumps({"actor":{"name":"agentA","mbox":"mailto:agentA@example.com"},"verb":{"id": "http://verb/iri/joined", "display":{"en-US":"joined"}}, "object": {"id":"act:i.pity.the.fool"}, "authority": {"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:agentA@example.com"},{"name":"agentB","mbox":"mailto:agentB@example.com"}]}}) response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 400) self.assertIn("Statements cannot have a non-OAuth group as the authority", response.content) def test_post_with_non_oauth_existing_group(self): ot = "Group" name = "the group ST" mbox = "mailto:the.groupST@example.com" group = {"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:agentA@example.com"},{"name":"agentB","mbox":"mailto:agentB@example.com"}]} Agent.objects.retrieve_or_create(**group) stmt = json.dumps({"actor":{"name":"agentA","mbox":"mailto:agentA@example.com"},"verb":{"id": "http://verb/iri/joined", "display":{"en-US":"joined"}}, "object": {"id":"act:i.pity.the.fool"}, "authority": {"objectType":ot, "name":name, "mbox":mbox,"member":[{"name":"agentA","mbox":"mailto:agentA@example.com"},{"name":"agentB","mbox":"mailto:agentB@example.com"}]}}) response = self.client.post(reverse(statements), stmt, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION) self.assertEqual(response.status_code, 400) self.assertEqual(response.content, "Statements cannot have a non-OAuth group as the authority")
66.603556
240
0.64054
8,735
74,929
5.362679
0.06743
0.07365
0.041885
0.0605
0.827765
0.794633
0.75356
0.717354
0.672032
0.640288
0
0.018852
0.171723
74,929
1,125
241
66.603556
0.735925
0.01771
0
0.402373
0
0.003236
0.316495
0.036178
0
0
0
0
0.317152
0
null
null
0.020496
0.014024
null
null
0.001079
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
6
54def8381a9d158f25ce1700c0f632a59c7ef8b5
2,135
py
Python
ibm_i/datadog_checks/ibm_i/config_models/defaults.py
OuesFa/integrations-core
0ffe4ca306580a2e775b515152384034c2dfdc03
[ "BSD-3-Clause" ]
null
null
null
ibm_i/datadog_checks/ibm_i/config_models/defaults.py
OuesFa/integrations-core
0ffe4ca306580a2e775b515152384034c2dfdc03
[ "BSD-3-Clause" ]
null
null
null
ibm_i/datadog_checks/ibm_i/config_models/defaults.py
OuesFa/integrations-core
0ffe4ca306580a2e775b515152384034c2dfdc03
[ "BSD-3-Clause" ]
null
null
null
# (C) Datadog, Inc. 2021-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) # This file is autogenerated. # To change this file you should edit assets/configuration/spec.yaml and then run the following commands: # ddev -x validate config -s <INTEGRATION_NAME> # ddev -x validate models -s <INTEGRATION_NAME> from datadog_checks.base.utils.models.fields import get_default_field_value def shared_service(field, value): return get_default_field_value(field, value) def instance_connection_string(field, value): return get_default_field_value(field, value) def instance_disable_generic_tags(field, value): return False def instance_driver(field, value): return 'iSeries Access ODBC Driver' def instance_empty_default_hostname(field, value): return False def instance_hostname(field, value): return get_default_field_value(field, value) def instance_job_query_timeout(field, value): return 240 def instance_metric_patterns(field, value): return get_default_field_value(field, value) def instance_min_collection_interval(field, value): return 15 def instance_password(field, value): return get_default_field_value(field, value) def instance_queries(field, value): return [ {'name': 'disk_usage'}, {'name': 'cpu_usage'}, {'name': 'jobq_job_status'}, {'name': 'active_job_status'}, {'name': 'job_memory_usage'}, {'name': 'memory_info'}, {'name': 'subsystem'}, {'name': 'job_queue'}, {'name': 'message_queue_info'}, ] def instance_query_timeout(field, value): return 30 def instance_service(field, value): return get_default_field_value(field, value) def instance_severity_threshold(field, value): return 50 def instance_system(field, value): return get_default_field_value(field, value) def instance_system_mq_query_timeout(field, value): return 80 def instance_tags(field, value): return get_default_field_value(field, value) def instance_username(field, value): return get_default_field_value(field, value)
22.956989
105
0.728806
283
2,135
5.222615
0.360424
0.250338
0.194858
0.135318
0.449256
0.392422
0.34912
0.34912
0.34912
0.317997
0
0.009075
0.174239
2,135
92
106
23.206522
0.829268
0.159251
0
0.234043
1
0
0.098489
0
0
0
0
0
0
1
0.382979
false
0.021277
0.021277
0.382979
0.787234
0
0
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
0733029afbb2c22e1af63d6a9fd161526d4a5f0a
83
py
Python
dynamicadmin/tests/tests_bundle_app/models.py
seht/django-dynamic-admin
5b476da2875ef182339a07ae603bbcf5fa1d9adc
[ "BSD-3-Clause" ]
1
2019-10-17T11:53:22.000Z
2019-10-17T11:53:22.000Z
dynamicadmin/tests/tests_bundle_app/models.py
seht/django-dynamic-admin
5b476da2875ef182339a07ae603bbcf5fa1d9adc
[ "BSD-3-Clause" ]
null
null
null
dynamicadmin/tests/tests_bundle_app/models.py
seht/django-dynamic-admin
5b476da2875ef182339a07ae603bbcf5fa1d9adc
[ "BSD-3-Clause" ]
null
null
null
from dynamicadmin.models.bundle import Bundle class TestBundle(Bundle): pass
13.833333
45
0.783133
10
83
6.5
0.8
0
0
0
0
0
0
0
0
0
0
0
0.156627
83
5
46
16.6
0.928571
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
075f6e770b752a04ce6d6a91056879d660757e4e
184
py
Python
tests/test_showcallstack.py
asweigart/showcallstack
3a58769a292cc96fcf3f9bf121022be027d11d7f
[ "BSD-3-Clause" ]
5
2020-10-29T07:18:27.000Z
2020-10-30T11:45:04.000Z
tests/test_showcallstack.py
asweigart/showcallstack
3a58769a292cc96fcf3f9bf121022be027d11d7f
[ "BSD-3-Clause" ]
null
null
null
tests/test_showcallstack.py
asweigart/showcallstack
3a58769a292cc96fcf3f9bf121022be027d11d7f
[ "BSD-3-Clause" ]
null
null
null
from __future__ import division, print_function import pytest import showcallstack def test_basic(): pass # TODO - add unit tests if __name__ == "__main__": pytest.main()
15.333333
47
0.728261
23
184
5.217391
0.826087
0
0
0
0
0
0
0
0
0
0
0
0.195652
184
11
48
16.727273
0.810811
0.11413
0
0
0
0
0.049689
0
0
0
0
0.090909
0
1
0.142857
true
0.142857
0.428571
0
0.571429
0.142857
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
1
1
1
0
1
0
0
6
075fb0e62849f4b3bfb31c7f7cb8434191f679aa
62
py
Python
ir_attachment_s3/models/__init__.py
agenterpgmbh/misc-addons
27e36d119b1e73089a2ebfcd8d4cfc706c8f1f41
[ "MIT" ]
null
null
null
ir_attachment_s3/models/__init__.py
agenterpgmbh/misc-addons
27e36d119b1e73089a2ebfcd8d4cfc706c8f1f41
[ "MIT" ]
1
2020-05-03T04:27:29.000Z
2020-05-03T04:27:29.000Z
ir_attachment_s3/models/__init__.py
eneldoserrata/misc-addons
6f3b94d8a71d603d9ad449f96edfc66385e78080
[ "MIT" ]
2
2020-05-09T02:08:59.000Z
2022-03-21T06:37:15.000Z
from . import ir_attachment from . import res_config_settings
20.666667
33
0.83871
9
62
5.444444
0.777778
0.408163
0
0
0
0
0
0
0
0
0
0
0.129032
62
2
34
31
0.907407
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
4afa7c5c57a25fb5165b8dab1cc8ebf97c4247ea
93
py
Python
build/lib/btreceptor/__init__.py
michael-swift/btreceptor
78ffd7ae59ac87659ffc54bc14da945975ac90bf
[ "Apache-2.0" ]
null
null
null
build/lib/btreceptor/__init__.py
michael-swift/btreceptor
78ffd7ae59ac87659ffc54bc14da945975ac90bf
[ "Apache-2.0" ]
null
null
null
build/lib/btreceptor/__init__.py
michael-swift/btreceptor
78ffd7ae59ac87659ffc54bc14da945975ac90bf
[ "Apache-2.0" ]
1
2020-07-19T20:25:40.000Z
2020-07-19T20:25:40.000Z
from . import parsing from . import sequences from . import clustering from . import drawing
18.6
24
0.784946
12
93
6.083333
0.5
0.547945
0
0
0
0
0
0
0
0
0
0
0.172043
93
4
25
23.25
0.948052
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ab469fc37f717f6194ba2eb99b66612205a534ff
20
py
Python
djangular/config/angularapp_template/tests/__init__.py
jianglb-alibaba/djangular-0.2.7
d1e2d188cf4ab8ae757bd9bc3069ffef8f0fc753
[ "Apache-2.0" ]
null
null
null
djangular/config/angularapp_template/tests/__init__.py
jianglb-alibaba/djangular-0.2.7
d1e2d188cf4ab8ae757bd9bc3069ffef8f0fc753
[ "Apache-2.0" ]
3
2020-02-12T00:28:52.000Z
2021-06-10T20:05:09.000Z
djangular/config/angularapp_template/tests/__init__.py
jianglb-alibaba/djangular-0.2.7
d1e2d188cf4ab8ae757bd9bc3069ffef8f0fc753
[ "Apache-2.0" ]
null
null
null
from simple import *
20
20
0.8
3
20
5.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.15
20
1
20
20
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
ab63d8d14626fea92597071e77badf0c5802c25f
3,743
py
Python
dstools/ml/cross_val_custom.py
DorimenUkraine/ds-tools
34454a6054b9a81358f392f996b41f38895b15fd
[ "Apache-2.0" ]
null
null
null
dstools/ml/cross_val_custom.py
DorimenUkraine/ds-tools
34454a6054b9a81358f392f996b41f38895b15fd
[ "Apache-2.0" ]
null
null
null
dstools/ml/cross_val_custom.py
DorimenUkraine/ds-tools
34454a6054b9a81358f392f996b41f38895b15fd
[ "Apache-2.0" ]
null
null
null
import numpy as np import pandas as pd from sklearn.model_selection import KFold,StratifiedKFold def cross_validation_score_statement(estimator,X,y,scoring,n_splits=5,statement=None,random_state=0): """ Evaluate a score by cross-validation. The fit method will be performed on the entire train subset at each iteration, the predict method and scoring will be performed only for objects from test subset where statement is True Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : pandas.DataFrame The data to fit. y : pandas.Series The target variable to try to predict. scoring : callable The scoring function of signature scoring(y_true,y_pred). statement : boolean numpy.array of shape equal to y.shape The mask showing the objects we want to evaluate estimator on. n_splits : int Number of folds for cross-validation random_state : int Random_state for KFold and StratifiedKFold Returns ----------- scores : array of float, shape=(n_splits,) """ if statement is None: cv = KFold(n_splits=n_splits,shuffle=True,random_state=random_state) cv_iter = list(cv.split(X, y)) else: cv = StratifiedKFold(n_splits=n_splits,shuffle=True,random_state=random_state) cv_iter = list(cv.split(X, statement)) scores = [] for train, test in cv_iter: estimator.fit(X.iloc[train,:].values,y.iloc[train].values) if statement is not None: y_statement = y.iloc[test].loc[statement[test]] pred_statement = estimator.predict(X.iloc[test,:].loc[statement[test]].values) else: y_statement = y.iloc[test] pred_statement = estimator.predict(X.iloc[test,:].values) scores.append(scoring(y_statement,pred_statement)) return np.array(scores) def cross_validation_score_fit_subset(estimator,X,y,scoring,n_splits=5,statement=None,random_state=0): """ Evaluate a score by cross-validation. The fit method will be performed on the subset of train subset at each iteration where statement is True. The predict method and scoring will be performed on the entire test subset. Parameters ---------- estimator : estimator object implementing 'fit' and 'predict' The object to use to fit the data. X : pandas.DataFrame The data to fit. y : pandas.Series The target variable to try to predict. scoring : callable The scoring function of signature scoring(y_true,y_pred). statement : boolean numpy.array of shape equal to y.shape The mask showing the objects we want to fit estimator on. n_splits : int Number of folds for cross-validation random_state : int Random_state for KFold and StratifiedKFold Returns ----------- scores : array of float, shape=(n_splits,) """ if statement is None: cv = KFold(n_splits=n_splits,shuffle=True,random_state=random_state) cv_iter = list(cv.split(X, y)) else: cv = StratifiedKFold(n_splits=n_splits,shuffle=True,random_state=random_state) cv_iter = list(cv.split(X, statement)) scores = [] for train, test in cv_iter: if statement is not None: estimator.fit(X.iloc[train,:].values[statement[train]],y.iloc[train].values[statement[train]]) else: estimator.fit(X.iloc[train,:].values,y.iloc[train].values) y_ = y.iloc[test] pred_ = estimator.predict(X.iloc[test,:].values) scores.append(scoring(y_,pred_)) return np.array(scores)
39.4
110
0.66444
519
3,743
4.684008
0.186898
0.040313
0.037022
0.023036
0.861785
0.780749
0.762238
0.762238
0.704237
0.704237
0
0.001412
0.243388
3,743
94
111
39.819149
0.856992
0.450975
0
0.631579
0
0
0
0
0
0
0
0
0
1
0.052632
false
0
0.078947
0
0.184211
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
db5b5684b46930ad9be0aba21be7e472122b3c22
75
py
Python
bot/seasons/evergreen/__init__.py
martmists/seasonalbot
d81f6ba86eecea72bf8a980995333af3046e9680
[ "MIT" ]
null
null
null
bot/seasons/evergreen/__init__.py
martmists/seasonalbot
d81f6ba86eecea72bf8a980995333af3046e9680
[ "MIT" ]
null
null
null
bot/seasons/evergreen/__init__.py
martmists/seasonalbot
d81f6ba86eecea72bf8a980995333af3046e9680
[ "MIT" ]
null
null
null
from bot.seasons import SeasonBase class Evergreen(SeasonBase): pass
12.5
34
0.773333
9
75
6.444444
0.888889
0
0
0
0
0
0
0
0
0
0
0
0.173333
75
5
35
15
0.935484
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
db766e7956dff111b2ff98a8e63492b82edf882b
49
py
Python
newsapi/__init__.py
amitshankar97/real-news
fb8272b24475ef67534f3943649e4517f3a09dd6
[ "0BSD" ]
2
2020-05-20T23:10:31.000Z
2020-12-09T13:00:06.000Z
newsapi/__init__.py
amitshankar97/real-news
fb8272b24475ef67534f3943649e4517f3a09dd6
[ "0BSD" ]
3
2020-04-27T18:13:30.000Z
2020-04-27T18:16:00.000Z
flask/lib/python3.6/site-packages/newsapi/__init__.py
JOFLIX/grapevines
34576e01184570d79cc140b42ffb71d322132da6
[ "MIT", "Unlicense" ]
null
null
null
from newsapi.newsapi_client import NewsApiClient
24.5
48
0.897959
6
49
7.166667
0.833333
0
0
0
0
0
0
0
0
0
0
0
0.081633
49
1
49
49
0.955556
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
db84862778b4403d602cc868e2cc13030df7ef74
1,503
py
Python
rvpvp/isa/rvv/vxxx_vxm.py
ultrafive/riscv-pvp
843e38422c3d545352b955764927d5e7847e5453
[ "Unlicense" ]
5
2021-05-10T09:57:00.000Z
2021-10-05T14:39:20.000Z
rvpvp/isa/rvv/vxxx_vxm.py
ultrafive/riscv-pvp
843e38422c3d545352b955764927d5e7847e5453
[ "Unlicense" ]
null
null
null
rvpvp/isa/rvv/vxxx_vxm.py
ultrafive/riscv-pvp
843e38422c3d545352b955764927d5e7847e5453
[ "Unlicense" ]
1
2021-05-14T20:24:11.000Z
2021-05-14T20:24:11.000Z
from ...isa.inst import * import numpy as np class Vadc_vxm(Inst): name = 'vadc.vxm' # vadc.vxm vd, vs2, rs1, v0 def golden(self): if self['vl']==0: return self['ori'] result = self['ori'].copy() vstart = self['vstart'] if 'vstart' in self else 0 mask = np.unpackbits(self['mask'], bitorder='little')[0: self['vl']] for ii in range(vstart, self['vl']): result[ii] = self['vs2'][ii].astype(object) + self['rs1'] + mask[ii] return result class Vsbc_vxm(Inst): name = 'vsbc.vxm' # vsbc.vxm vd, vs2, rs1, v0 def golden(self): if self['vl']==0: return self['ori'] result = self['ori'].copy() vstart = self['vstart'] if 'vstart' in self else 0 mask = np.unpackbits(self['mask'], bitorder='little')[0: self['vl']] for ii in range(vstart, self['vl']): result[ii] = self['vs2'][ii].astype(object) - self['rs1'] - mask[ii] return result class Vmerge_vxm(Inst): name = 'vmerge.vxm' # vmerge.vxm vd, vs2, rs1, v0 def golden(self): if self['vl']==0: return self['ori'] result = self['ori'].copy() vstart = self['vstart'] if 'vstart' in self else 0 mask = np.unpackbits(self['mask'], bitorder='little')[0: self['vl']] for ii in range(vstart, self['vl']): result[ii] = self['rs1'] if mask[ii] else self['vs2'][ii] return result
33.4
80
0.530273
206
1,503
3.854369
0.18932
0.06801
0.041562
0.041562
0.802267
0.802267
0.802267
0.802267
0.802267
0.802267
0
0.02262
0.294079
1,503
44
81
34.159091
0.72573
0.055223
0
0.685714
0
0
0.103399
0
0
0
0
0
0
1
0.085714
false
0
0.057143
0
0.485714
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
dbc0390b0b420b1cf6059101342c34caed99808f
35
py
Python
toolcache/cachetypes/base_cache/__init__.py
sslivkoff/toolcache
62fb3441adb03fdee4fdbca14605f0ecec2ad44c
[ "Apache-2.0" ]
null
null
null
toolcache/cachetypes/base_cache/__init__.py
sslivkoff/toolcache
62fb3441adb03fdee4fdbca14605f0ecec2ad44c
[ "Apache-2.0" ]
null
null
null
toolcache/cachetypes/base_cache/__init__.py
sslivkoff/toolcache
62fb3441adb03fdee4fdbca14605f0ecec2ad44c
[ "Apache-2.0" ]
null
null
null
from .base_cache import BaseCache
11.666667
33
0.828571
5
35
5.6
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
35
2
34
17.5
0.933333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
dbd4a186d47812480f69f1a275ca89ffaf637086
89
py
Python
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/fromImportStatementsData/in_2_several_star_imports.py
JetBrains-Research/Lupa
c105487621564c60cae17395bf32eb40868ceb89
[ "Apache-2.0" ]
16
2022-01-11T00:32:20.000Z
2022-03-25T21:40:52.000Z
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/fromImportStatementsData/in_2_several_star_imports.py
nbirillo/Kotlin-Analysis
73c3b8a59bf40ed932bb512f30b0ff31f251af40
[ "Apache-2.0" ]
12
2021-07-05T11:42:01.000Z
2021-12-23T07:57:54.000Z
python-analysers/src/test/resources/org/jetbrains/research/lupa/pythonAnalysis/imports/analysis/psi/fromImportStatementsData/in_2_several_star_imports.py
nbirillo/Kotlin-Analysis
73c3b8a59bf40ed932bb512f30b0ff31f251af40
[ "Apache-2.0" ]
3
2021-09-10T13:21:54.000Z
2021-11-23T11:37:55.000Z
from math import * from cmath import * from plotly.express import * from pandas import *
17.8
28
0.764045
13
89
5.230769
0.538462
0.441176
0
0
0
0
0
0
0
0
0
0
0.179775
89
4
29
22.25
0.931507
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
91563cce06082f01305869c8321f2eb5894825fe
20
py
Python
gdeck/__init__.py
damiiegregorio/gdeck
fca2857b88ea7fbc41d148ad7efee2456c5e4e21
[ "MIT" ]
null
null
null
gdeck/__init__.py
damiiegregorio/gdeck
fca2857b88ea7fbc41d148ad7efee2456c5e4e21
[ "MIT" ]
null
null
null
gdeck/__init__.py
damiiegregorio/gdeck
fca2857b88ea7fbc41d148ad7efee2456c5e4e21
[ "MIT" ]
null
null
null
from .gdeck import *
20
20
0.75
3
20
5
1
0
0
0
0
0
0
0
0
0
0
0
0.15
20
1
20
20
0.882353
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
916d0b9e00472ebd1f20673c9679d2260e4b26a2
9,533
py
Python
galileo/tests/tf/test_neighbor.py
YaoPu2021/galileo
0ebee2052bf78205f93f8cbbe0e2884095dd7af7
[ "Apache-2.0" ]
115
2021-09-09T03:01:58.000Z
2022-03-30T10:46:26.000Z
galileo/tests/tf/test_neighbor.py
Hacky-DH/galileo
e4d5021f0287dc879730dfa287b9a056f152f712
[ "Apache-2.0" ]
1
2021-12-09T07:34:41.000Z
2021-12-20T06:24:27.000Z
galileo/tests/tf/test_neighbor.py
Hacky-DH/galileo
e4d5021f0287dc879730dfa287b9a056f152f712
[ "Apache-2.0" ]
28
2021-09-10T08:47:20.000Z
2022-03-17T07:29:26.000Z
# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os import pytest import tensorflow as tf from galileo.tests import expected_data from galileo.tests.utils import numpy_equal_unique, numpy_equal from galileo.tf import ops valid_ids = ('global', 'valid_zero', 'valid_one', 'valid_multi') valid_topk_ids = ('global', 'valid_one', 'valid_multi') invalid_ids = ('no_neighbor', 'invalid_types') valid_params = ( ([1001], [], [1000, 1003, 1004, 1005], [3.5, 5.5]), ([1001], [0], [1003, 1004, 1005], [3.5]), ([1001], [1], [1000], [5.5]), ([1000, 1002, 1006, 1009], [0, 1], [1001, 1001, 1000, 1000], [3.5, 5.5]), ) valid_topk_params = ( ([1001], [], [1000], [5.5]), ([1001], [1], [1000], [5.5]), ([1000, 1002, 1006, 1009], [0, 1], [1001, 1001, 1000, 1000], [3.5, 3.5, 5.5, 5.5]), ) invalid_params = ( pytest.param([1003, 1004, 1005], []), pytest.param([1001], [-1, 5]), ) @pytest.mark.parametrize('vertex,types,expected_nbr,expected_weight', valid_params, ids=valid_ids) def test_valid_sample_neighbor_with_weight(prepare_tf_env, vertex, types, expected_nbr, expected_weight): count = 100 is_weight = True neighbors, weights = ops.sample_neighbors(vertex, types, count, is_weight) assert numpy_equal([len(vertex), count], tf.shape(neighbors).numpy()) assert numpy_equal([len(vertex), count], tf.shape(weights).numpy()) assert numpy_equal_unique(expected_nbr, neighbors.numpy()) assert numpy_equal_unique(expected_weight, weights.numpy()) @pytest.mark.parametrize('vertex,types', invalid_params, ids=invalid_ids) def test_invalid_sample_neighbor_with_weight(prepare_tf_env, vertex, types): count = 100 is_weight = True with pytest.raises(Exception): ops.sample_neighbors(vertex, types, count, is_weight) @pytest.mark.parametrize('vertex,types,expected_nbr,expected_weight', valid_params, ids=valid_ids) def test_valid_sample_neighbor_without_weight(prepare_tf_env, vertex, types, expected_nbr, expected_weight): vertex_tensor = tf.constant(vertex, dtype=tf.int64) types_tensor = tf.constant(types, dtype=tf.uint8) count = 100 is_weight = False neighbors = ops.sample_neighbors(vertex_tensor, types_tensor, count, is_weight)[0] assert numpy_equal([len(vertex), count], tf.shape(neighbors).numpy()) assert numpy_equal_unique(expected_nbr, neighbors.numpy()) @pytest.mark.parametrize('vertex,types', invalid_params, ids=invalid_ids) def test_invalid_sample_neighbor_without_weight(prepare_tf_env, vertex, types): vertex_tensor = tf.constant(vertex, dtype=tf.int64) types_tensor = tf.constant(types, dtype=tf.uint8) count = 100 is_weight = False with pytest.raises(Exception): ops.sample_neighbors(vertex_tensor, types_tensor, count, is_weight)[0] @pytest.mark.parametrize('vertex,types,expected_nbr,expected_weight', valid_topk_params, ids=valid_topk_ids) def test_valid_topk_neighbor_with_weight(prepare_tf_env, vertex, types, expected_nbr, expected_weight): count = 1 is_weight = True neighbors = ops.get_topk_neighbors(vertex, types, count, is_weight) assert numpy_equal([len(vertex), 1], tf.shape(neighbors[0]).numpy()) assert numpy_equal([len(vertex), 1], tf.shape(neighbors[1]).numpy()) assert numpy_equal(expected_nbr, tf.reshape(neighbors[0], [-1]).numpy()) assert numpy_equal(expected_weight, tf.reshape(neighbors[1], [-1]).numpy()) @pytest.mark.parametrize('vertex,types', invalid_params, ids=invalid_ids) def test_invalid_topk_neighbor_with_weight(prepare_tf_env, vertex, types): count = 2 is_weight = True with pytest.raises(Exception): ops.get_topk_neighbors(vertex, types, count, is_weight) @pytest.mark.parametrize('vertex,types,expected_nbr,expected_weight', valid_topk_params, ids=valid_topk_ids) def test_valid_topk_neighbor_without_weight(prepare_tf_env, vertex, types, expected_nbr, expected_weight): vertex_tensor = tf.constant(vertex, dtype=tf.int64) types_tensor = tf.constant(types, dtype=tf.uint8) count = 1 is_weight = False neighbors = ops.get_topk_neighbors(vertex_tensor, types_tensor, count, is_weight) assert numpy_equal([len(vertex), 1], tf.shape(neighbors[0]).numpy()) assert numpy_equal(expected_nbr, tf.reshape(neighbors[0], [-1]).numpy()) @pytest.mark.parametrize('vertex,types', invalid_params, ids=invalid_ids) def test_invalid_topk_neighbor_without_weight(prepare_tf_env, vertex, types): count = 2 is_weight = False vertex_tensor = tf.constant(vertex, dtype=tf.int64) types_tensor = tf.constant(types, dtype=tf.uint8) with pytest.raises(Exception): ops.get_topk_neighbors(vertex_tensor, types_tensor, count, is_weight) @pytest.mark.parametrize('vertex,types,expected_nbr,expected_weight', valid_params, ids=valid_ids) def test_valid_full_neighbor_with_weight(prepare_tf_env, vertex, types, expected_nbr, expected_weight): is_weight = True neighbors = ops.get_full_neighbors(vertex, types, is_weight) assert 3 == len(neighbors) assert numpy_equal_unique(expected_nbr, neighbors[0].numpy()) assert numpy_equal_unique(expected_weight, neighbors[1].numpy()) @pytest.mark.parametrize('vertex,types', invalid_params, ids=invalid_ids) def test_invalid_full_neighbor_with_weight(prepare_tf_env, vertex, types): vertex_tensor = tf.constant(vertex, dtype=tf.int64) types_tensor = tf.constant(types, dtype=tf.uint8) is_weight = True with pytest.raises(Exception): ops.get_full_neighbors(vertex_tensor, types_tensor, is_weight) @pytest.mark.parametrize('vertex,types,expected_nbr,expected_weight', valid_params, ids=valid_ids) def test_valid_full_neighbor_without_weight(prepare_tf_env, vertex, types, expected_nbr, expected_weight): vertex_tensor = tf.constant(vertex, dtype=tf.int64) types_tensor = tf.constant(types, dtype=tf.uint8) is_weight = False neighbors = ops.get_full_neighbors(vertex_tensor, types_tensor, is_weight) assert 2 == len(neighbors) assert numpy_equal_unique(expected_nbr, neighbors[0].numpy()) @pytest.mark.parametrize('vertex,types', invalid_params, ids=invalid_ids) def test_invalid_full_neighbor_without_weight(prepare_tf_env, vertex, types): vertex_tensor = tf.constant(vertex, dtype=tf.int64) types_tensor = tf.constant(types, dtype=tf.uint8) is_weight = False with pytest.raises(Exception): ops.get_full_neighbors(vertex_tensor, types_tensor, is_weight) def test_empty_inputs(prepare_tf_env): vertex_tensor = tf.constant(1, dtype=tf.int64, shape=(0, )) types_tensor = tf.constant([0], dtype=tf.uint8) res = ops.sample_neighbors(vertex_tensor, types_tensor, 5, False) assert 1 == len(res) assert res[0].dtype == tf.int64 assert res[0].shape[0] == 0 assert res[0].shape[1] == 5 res = ops.get_topk_neighbors(vertex_tensor, types_tensor, 2, False) assert 1 == len(res) assert res[0].dtype == tf.int64 assert res[0].shape[0] == 0 assert res[0].shape[1] == 2 res = ops.get_full_neighbors(vertex_tensor, types_tensor, False) assert 2 == len(res) assert res[0].dtype == tf.int64 assert res[0].shape[0] == 0 assert res[1].dtype == tf.int32 assert res[1].shape[0] == 0 assert res[1].shape[1] == 2 res = ops.sample_neighbors(vertex_tensor, types_tensor, 5, True) assert 2 == len(res) assert res[0].dtype == tf.int64 assert res[0].shape[0] == 0 assert res[0].shape[1] == 5 assert res[1].dtype == tf.float32 assert res[1].shape[0] == 0 assert res[1].shape[1] == 5 res = ops.get_topk_neighbors(vertex_tensor, types_tensor, 2, True) assert 2 == len(res) assert res[0].dtype == tf.int64 assert res[0].shape[0] == 0 assert res[0].shape[1] == 2 assert res[1].dtype == tf.float32 assert res[1].shape[0] == 0 assert res[1].shape[1] == 2 res = ops.get_full_neighbors(vertex_tensor, types_tensor, True) assert 3 == len(res) assert res[0].dtype == tf.int64 assert res[0].shape[0] == 0 assert res[1].dtype == tf.float32 assert res[1].shape[0] == 0 assert res[2].dtype == tf.int32 assert res[2].shape[0] == 0 assert res[2].shape[1] == 2
40.565957
80
0.664009
1,300
9,533
4.65
0.108462
0.044665
0.042349
0.03871
0.823987
0.810753
0.787428
0.775517
0.758809
0.706038
0
0.045352
0.206651
9,533
234
81
40.739316
0.753934
0.069863
0
0.622951
0
0
0.045639
0.02779
0
0
0
0
0.289617
1
0.071038
false
0
0.032787
0
0.103825
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
9178cf2ff8c985edcc7730680e5fd14e612ee056
39
py
Python
winch/__init__.py
dragonrobotics/2018-PowerUp
0fb6be22420b1488ca3d6abb04588e8564d768b9
[ "MIT" ]
2
2018-02-08T23:29:21.000Z
2018-12-27T22:45:12.000Z
winch/__init__.py
dragonrobotics/2018-PowerUp
0fb6be22420b1488ca3d6abb04588e8564d768b9
[ "MIT" ]
2
2018-02-10T20:25:16.000Z
2018-02-20T12:47:33.000Z
winch/__init__.py
dragonrobotics/2018-PowerUp
0fb6be22420b1488ca3d6abb04588e8564d768b9
[ "MIT" ]
8
2018-01-15T14:53:52.000Z
2018-02-14T22:34:30.000Z
from .winch import Winch # noqa: F401
19.5
38
0.717949
6
39
4.666667
0.833333
0
0
0
0
0
0
0
0
0
0
0.096774
0.205128
39
1
39
39
0.806452
0.25641
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
917cbf26fe31c646cac2e38d3520b360dfd7abe7
150
py
Python
01/HelloWorld.py
whuang022ai/AI-Tutorial-Python-Data-Science-Basic
63689f8d1b2f2b4eedfcc913f7d61f17260f4713
[ "Apache-2.0" ]
1
2019-02-16T07:05:00.000Z
2019-02-16T07:05:00.000Z
01/HelloWorld.py
whuang022ai/AI-Tutorial-Python-Data-Science-Basic
63689f8d1b2f2b4eedfcc913f7d61f17260f4713
[ "Apache-2.0" ]
null
null
null
01/HelloWorld.py
whuang022ai/AI-Tutorial-Python-Data-Science-Basic
63689f8d1b2f2b4eedfcc913f7d61f17260f4713
[ "Apache-2.0" ]
1
2019-02-16T07:18:22.000Z
2019-02-16T07:18:22.000Z
print('Hello World') # print the 'Hello World\n' py3 print('Hello ', end="") # print the 'Hello ' py3 print('World ', end="") # print the 'World' py3
37.5
52
0.633333
23
150
4.130435
0.304348
0.252632
0.273684
0
0
0
0
0
0
0
0
0.024
0.166667
150
4
53
37.5
0.736
0.493333
0
0
0
0
0.319444
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
917fcfc1ea632a8fac6b190edc4a018a7ea98bb9
331
py
Python
rasa_addons/core/channels/__init__.py
Amirali-Shirkh/rasa-for-botfront
36aa24ad31241c5d1a180bbe34e1c8c50da40ff7
[ "Apache-2.0" ]
90
2018-04-11T11:54:57.000Z
2019-05-26T09:52:40.000Z
rasa_addons/core/channels/__init__.py
Amirali-Shirkh/rasa-for-botfront
36aa24ad31241c5d1a180bbe34e1c8c50da40ff7
[ "Apache-2.0" ]
25
2018-05-28T11:08:58.000Z
2019-04-07T15:12:45.000Z
rasa_addons/core/channels/__init__.py
Amirali-Shirkh/rasa-for-botfront
36aa24ad31241c5d1a180bbe34e1c8c50da40ff7
[ "Apache-2.0" ]
22
2018-06-06T19:16:42.000Z
2019-05-31T14:36:28.000Z
from rasa_addons.core.channels.facebook_messenger import FBMessengerInput from rasa_addons.core.channels.rest import BotfrontRestInput from rasa_addons.core.channels.rest_plus import BotfrontRestPlusInput from rasa_addons.core.channels.webchat import WebchatInput from rasa_addons.core.channels.webchat_plus import WebchatPlusInput
66.2
73
0.897281
43
331
6.72093
0.372093
0.138408
0.242215
0.311419
0.525952
0.435986
0
0
0
0
0
0
0.057402
331
5
74
66.2
0.926282
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
91b7a317e5857e6924cf9bca4e03635a5a450952
202
py
Python
Chapter 12/ch12_2.py
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
f6a4194684515495d00aa38347a725dd08f39a0c
[ "MIT" ]
null
null
null
Chapter 12/ch12_2.py
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
f6a4194684515495d00aa38347a725dd08f39a0c
[ "MIT" ]
null
null
null
Chapter 12/ch12_2.py
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
f6a4194684515495d00aa38347a725dd08f39a0c
[ "MIT" ]
null
null
null
class Human: def __init__(self, name, age): self.name = name self.salary = age def display(self): print ("Name : ", self.name, ", Age: ", self.age) e1 = Human("Ashu", 30) e1.display()
20.2
52
0.594059
29
202
4
0.448276
0.206897
0.189655
0.258621
0
0
0
0
0
0
0
0.025641
0.227723
202
9
53
22.444444
0.717949
0
0
0
0
0
0.093264
0
0
0
0
0
0
1
0.25
false
0
0
0
0.375
0.125
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
91c486f7c7e1bfa6247d1a4d7f3024a5e55ba585
3,076
py
Python
deepfence_backend/utils/es_query_utils.py
riddopic/ThreatMapper
59bf7b5198b7fad609c78abfaf34887ef73ea652
[ "Apache-2.0" ]
null
null
null
deepfence_backend/utils/es_query_utils.py
riddopic/ThreatMapper
59bf7b5198b7fad609c78abfaf34887ef73ea652
[ "Apache-2.0" ]
null
null
null
deepfence_backend/utils/es_query_utils.py
riddopic/ThreatMapper
59bf7b5198b7fad609c78abfaf34887ef73ea652
[ "Apache-2.0" ]
null
null
null
from utils.constants import CVE_INDEX, ES_TERMS_AGGR_SIZE, SECRET_SCAN_INDEX, CVE_ES_TYPE from utils.esconn import ESConn def get_latest_cve_scan_id(): # Deprecated: use utils.helper.get_recent_scan_ids aggs = { "cve_container_image": { "terms": { "field": "cve_container_image.keyword", "size": ES_TERMS_AGGR_SIZE }, "aggs": { "scan_id": { "terms": { "field": "scan_id.keyword", "size": ES_TERMS_AGGR_SIZE }, "aggs": { "scan_recent_timestamp": { "max": { "field": "@timestamp" } } } } } } } aggs_response = ESConn.aggregation_helper( CVE_INDEX, {"type": CVE_ES_TYPE}, aggs, add_masked_filter=False, ) scan_ids = [] if "aggregations" in aggs_response: for image_aggr in aggs_response["aggregations"]["cve_container_image"]["buckets"]: latest_scan_id = "" latest_scan_time = 0 for scan_id_aggr in image_aggr["scan_id"]["buckets"]: if scan_id_aggr["scan_recent_timestamp"]["value"] > latest_scan_time: latest_scan_time = scan_id_aggr["scan_recent_timestamp"]["value"] latest_scan_id = scan_id_aggr["key"] scan_ids.append(latest_scan_id) return scan_ids def get_latest_secret_scan_id(): # Deprecated: use utils.helper.get_recent_scan_ids aggs = { "node_id": { "terms": { "field": "node_id.keyword", "size": ES_TERMS_AGGR_SIZE }, "aggs": { "scan_id": { "terms": { "field": "scan_id.keyword", "size": ES_TERMS_AGGR_SIZE }, "aggs": { "scan_recent_timestamp": { "max": { "field": "@timestamp" } } } } } } } aggs_response = ESConn.aggregation_helper( SECRET_SCAN_INDEX, {}, aggs, add_masked_filter=False, ) scan_ids = [] if "aggregations" in aggs_response: for image_aggr in aggs_response["aggregations"]["node_id"]["buckets"]: latest_scan_id = "" latest_scan_time = 0 for scan_id_aggr in image_aggr["scan_id"]["buckets"]: if scan_id_aggr["scan_recent_timestamp"]["value"] > latest_scan_time: latest_scan_time = scan_id_aggr["scan_recent_timestamp"]["value"] latest_scan_id = scan_id_aggr["key"] scan_ids.append(latest_scan_id) return scan_ids
33.802198
90
0.470741
289
3,076
4.567474
0.16609
0.1
0.060606
0.056818
0.807576
0.807576
0.807576
0.807576
0.807576
0.806061
0
0.001142
0.430429
3,076
90
91
34.177778
0.752283
0.031534
0
0.595238
0
0
0.165323
0.051411
0
0
0
0
0
1
0.02381
false
0
0.02381
0
0.071429
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
91dbdce6fce8c76018752a14e02e9ae88c1c6196
219
py
Python
ginga/canvas/mixins.py
Cadair/ginga
5afdd8824f27c7ae7d8d82b5013b0ff0068bd8b8
[ "BSD-3-Clause" ]
null
null
null
ginga/canvas/mixins.py
Cadair/ginga
5afdd8824f27c7ae7d8d82b5013b0ff0068bd8b8
[ "BSD-3-Clause" ]
20
2021-05-03T18:02:23.000Z
2022-03-12T12:01:04.000Z
Lib/site-packages/ginga/canvas/mixins.py
fochoao/cpython
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
[ "bzip2-1.0.6", "0BSD" ]
null
null
null
# # convenience module for importing canvas mixin classes # from ginga.canvas.CompoundMixin import CompoundMixin from ginga.canvas.CanvasMixin import CanvasMixin from ginga.canvas.DrawingMixin import DrawingMixin #END
24.333333
55
0.844749
26
219
7.115385
0.538462
0.145946
0.243243
0
0
0
0
0
0
0
0
0
0.109589
219
8
56
27.375
0.948718
0.255708
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
37fe2fc5682d1e6f2c7bfc6771aaff6b6b910385
67
py
Python
b_cfn_opensearch_index_tests/integration/fixtures/__init__.py
gkazla/B.CfnOpenSearchIndex
4d40fc8beff8a719729994e8e626b3dfca08eaf5
[ "Apache-2.0" ]
null
null
null
b_cfn_opensearch_index_tests/integration/fixtures/__init__.py
gkazla/B.CfnOpenSearchIndex
4d40fc8beff8a719729994e8e626b3dfca08eaf5
[ "Apache-2.0" ]
null
null
null
b_cfn_opensearch_index_tests/integration/fixtures/__init__.py
gkazla/B.CfnOpenSearchIndex
4d40fc8beff8a719729994e8e626b3dfca08eaf5
[ "Apache-2.0" ]
null
null
null
from .opensearch_client import * from .generate_documents import *
22.333333
33
0.820896
8
67
6.625
0.75
0
0
0
0
0
0
0
0
0
0
0
0.119403
67
2
34
33.5
0.898305
0
0
0
1
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
534cc902393cfadea68951b6575369f7c35b02de
71
py
Python
backend/movieassets/helpers.py
lahim/interview-starwars
643eb0ad147faef1faf02ab2a88e130e4f3dcbaa
[ "MIT" ]
null
null
null
backend/movieassets/helpers.py
lahim/interview-starwars
643eb0ad147faef1faf02ab2a88e130e4f3dcbaa
[ "MIT" ]
null
null
null
backend/movieassets/helpers.py
lahim/interview-starwars
643eb0ad147faef1faf02ab2a88e130e4f3dcbaa
[ "MIT" ]
null
null
null
from pathlib import Path def path(p: Path) -> str: return str(p)
11.833333
25
0.647887
12
71
3.833333
0.666667
0
0
0
0
0
0
0
0
0
0
0
0.239437
71
5
26
14.2
0.851852
0
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0.333333
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
1
1
0
0
6
5360f245bf682a86ca6026138a96861cebe1efd5
25
py
Python
mcdb/__main__.py
Arcensoth/mcdb
ad0b9422235027c0062512c26f2c18fb7c14fcf5
[ "MIT" ]
1
2018-09-23T05:16:59.000Z
2018-09-23T05:16:59.000Z
mcdb/__main__.py
Arcensoth/mcdb
ad0b9422235027c0062512c26f2c18fb7c14fcf5
[ "MIT" ]
null
null
null
mcdb/__main__.py
Arcensoth/mcdb
ad0b9422235027c0062512c26f2c18fb7c14fcf5
[ "MIT" ]
null
null
null
from mcdb.cli import cli
12.5
24
0.8
5
25
4
0.8
0
0
0
0
0
0
0
0
0
0
0
0.16
25
1
25
25
0.952381
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
727ad311418c82707445d8624f9760e58e320a57
4,094
py
Python
tests/unittests/test_postgresqlpoll.py
novaweb-mobi/nova-api
2887118ff10d18f366ce661262bd25bb96648470
[ "MIT" ]
3
2020-09-08T23:33:41.000Z
2021-12-24T20:50:13.000Z
tests/unittests/test_postgresqlpoll.py
novaweb-mobi/nova-api
2887118ff10d18f366ce661262bd25bb96648470
[ "MIT" ]
39
2020-07-29T12:34:14.000Z
2022-03-05T16:50:29.000Z
tests/unittests/test_postgresqlpoll.py
novaweb-mobi/nova-api
2887118ff10d18f366ce661262bd25bb96648470
[ "MIT" ]
1
2021-03-05T19:41:58.000Z
2021-03-05T19:41:58.000Z
from mock import call from pytest import fixture from nova_api.persistence.postgresql_pool import PostgreSQLPool class TestPostgreSQLPoll: @fixture def pooling_mock(self, mocker): return mocker.patch('nova_api.persistence.postgresql_pool.pool') def test_get_instance_not_exist(self, pooling_mock): mock = pooling_mock.SimpleConnectionPool PostgreSQLPool.get_instance(host="test_host", user="test_user", password="test_passwd", database="test_db") assert mock.mock_calls == [call( maxconn=5, minconn=5, host='test_host', database='test_db', user='test_user', password='test_passwd')] def test_get_instance_wrong_chars(self, pooling_mock): PostgreSQLPool.get_instance(host="test_host", user="test_user@test_host", password="test_passwd", database="test_db") assert pooling_mock.mock_calls == [call.SimpleConnectionPool( maxconn=5, minconn=5, host='test_host', database='test_db', user='test_user@test_host', password='test_passwd')] def test_get_instance_too_loong(self, pooling_mock): PostgreSQLPool.get_instance( host="test_hosthosthosthosthosthosthosthosthos" "thosthosthosthosthosthosthost", user="test_user@test_host", password="test_passwd", database="test_db") assert pooling_mock.mock_calls == [call.SimpleConnectionPool( maxconn=5, minconn=5, host="test_hosthosthosthosthosthosthosthosthos" "thosthosthosthosthosthosthost", database='test_db', user='test_user@test_host', password='test_passwd')] def test_get_instance_exist_extra_args(self, pooling_mock): PostgreSQLPool.get_instance(host="test_host", user="test_user", password="test_passwd", database="test_db", database_args={"ssl_ca": "file"}) assert pooling_mock.mock_calls == [] def test_get_instance_not_exist_extra_args(self, pooling_mock): PostgreSQLPool.get_instance(host="test_host2", user="test_user", password="test_passwd", database="test_db", database_args={"ssl_ca": "file"}) assert pooling_mock.mock_calls == [call.SimpleConnectionPool( maxconn=5, minconn=5, host='test_host2', database='test_db', user='test_user', password='test_passwd', ssl_ca='file')] def test_get_instance_exist(self, pooling_mock): inst_1 = PostgreSQLPool.get_instance(host="test_host", user="test_user", password="test_passwd", database="test_db") inst_2 = PostgreSQLPool.get_instance(host="test_host", user="test_user", password="test_passwd", database="test_db") assert inst_1 is not None assert inst_1 == inst_2 def test_get_instance_different(self, pooling_mock): inst_1 = PostgreSQLPool.get_instance(host="test_host", user="test_user", password="test_passwd", database="test_db") inst_2 = PostgreSQLPool.get_instance(host="test_host", user="test_user", password="test_passwd", database="test_db2") assert inst_1 is not None and inst_2 is not None assert inst_1 != inst_2
43.553191
79
0.538349
390
4,094
5.312821
0.146154
0.065637
0.07529
0.125965
0.802124
0.760135
0.725386
0.716699
0.669402
0.639479
0
0.008587
0.374206
4,094
93
80
44.021505
0.800156
0
0
0.658537
0
0
0.170005
0.043723
0
0
0
0
0.109756
1
0.097561
false
0.158537
0.036585
0.012195
0.158537
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
6
727b78b3d59268fa33d22bcc539dbb030dfc6fe1
1,648
py
Python
python/_207.py
alanlou/leetcode-solutions
42e87a50748b481209fde4b79d5d65169a425507
[ "MIT" ]
3
2021-01-03T01:17:18.000Z
2021-12-15T06:17:15.000Z
python/_207.py
alanlou/leetcode-solutions
42e87a50748b481209fde4b79d5d65169a425507
[ "MIT" ]
null
null
null
python/_207.py
alanlou/leetcode-solutions
42e87a50748b481209fde4b79d5d65169a425507
[ "MIT" ]
1
2021-09-29T11:07:23.000Z
2021-09-29T11:07:23.000Z
# Solution 1. topo sort using BFS # O(N) / O(N) class Solution: def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool: # construct graph graph = {i: [] for i in range(numCourses)} in_degree = {i: 0 for i in range(numCourses)} for u, v in prerequisites: graph[v].append(u) in_degree[u] += 1 # find all sources sources = [i for i in range(numCourses) if in_degree[i] == 0] # bfs while sources: nxt_sources = [] for cur in sources: for nxt in graph[cur]: in_degree[nxt] -= 1 if in_degree[nxt] == 0: nxt_sources.append(nxt) sources = nxt_sources return all(v == 0 for v in in_degree.values()) # Solution 2. topo sort using DFS # O(N) / O(N) class Solution: def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool: # construct graph graph = {i: [] for i in range(numCourses)} in_degree = {i: 0 for i in range(numCourses)} for u, v in prerequisites: graph[v].append(u) in_degree[u] += 1 # find all sources sources = [i for i in range(numCourses) if in_degree[i] == 0] # dfs while sources: cur = sources.pop() for nxt in graph[cur]: in_degree[nxt] -= 1 if in_degree[nxt] == 0: sources.append(nxt) return all(v == 0 for v in in_degree.values())
32.96
81
0.496966
209
1,648
3.84689
0.191388
0.119403
0.044776
0.08209
0.788557
0.788557
0.788557
0.788557
0.788557
0.788557
0
0.014113
0.398058
1,648
50
82
32.96
0.796371
0.097694
0
0.8125
0
0
0
0
0
0
0
0
0
1
0.0625
false
0
0
0
0.1875
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
72840d2036585d7b83b2266ffabf1bb5ba349211
1,693
py
Python
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/evaluate_service/config.py
Ascend/modelzoo
f018cfed33dbb1cc2110b9ea2e233333f71cc509
[ "Apache-2.0" ]
12
2020-12-13T08:34:24.000Z
2022-03-20T15:17:17.000Z
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/evaluate_service/config.py
Ascend/modelzoo
f018cfed33dbb1cc2110b9ea2e233333f71cc509
[ "Apache-2.0" ]
1
2022-01-20T03:11:05.000Z
2022-01-20T06:53:39.000Z
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/evaluate_service/config.py
Ascend/modelzoo
f018cfed33dbb1cc2110b9ea2e233333f71cc509
[ "Apache-2.0" ]
2
2021-07-10T12:40:46.000Z
2021-12-17T07:55:15.000Z
# coding=utf-8 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """The config of the evaluate service.""" davinci_environment_type = "ATLAS200DK" # Evb , ATLAS200DK or Atlas300 ddk_host_ip = "192.168.0.1" listen_port = 8888 # if your environment_type is ATLAS200DK, the following parameters should be configed, if not, just ignore it ddk_user_name = "ly" atlas_host_ip = "192.168.0.2"
43.410256
109
0.692853
241
1,693
4.825726
0.439834
0.103181
0.044712
0.05503
0.737747
0.715391
0.715391
0.715391
0.715391
0.715391
0
0.034028
0.149439
1,693
38
110
44.552632
0.773611
0.876551
0
0
0
0
0.196532
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
72a97bd2720347410531d7069722789dc608abc2
1,691
py
Python
pyairwatch/mdm/mdm.py
llxp/PyVMwareAirWatch
5953f3f21b0fece20f2ec027fef42d8a3eb29de1
[ "MIT" ]
2
2021-04-20T03:41:03.000Z
2021-09-23T10:56:11.000Z
pyairwatch/mdm/mdm.py
llxp/PyVMwareAirWatch
5953f3f21b0fece20f2ec027fef42d8a3eb29de1
[ "MIT" ]
null
null
null
pyairwatch/mdm/mdm.py
llxp/PyVMwareAirWatch
5953f3f21b0fece20f2ec027fef42d8a3eb29de1
[ "MIT" ]
1
2021-09-23T12:45:25.000Z
2021-09-23T12:45:25.000Z
class MDM(object): """ Base MDM class """ def __init__(self, client): self.client = client def _get(self, module='mdm', path=None, version=None, params=None, header=None): """GET requests for base mdm endpoints""" return self.client.get(module=module, path=path, version=version, params=params, header=header) def _post(self, module='mdm', path=None, version=None, params=None, data=None, json=None, header=None): """POST requests for base mdm endpoints""" return self.client.post(module=module, path=path, version=version, params=params, data=data, json=json, header=header) def _put(self, module='mdm', path=None, version=None, params=None, data=None, json=None, header=None): """PUT requests for base mdm endpoints""" return self.client.put(module=module, path=path, version=version, params=params, data=data, json=json, header=header) def _patch(self, module='mdm', path=None, version=None, params=None, data=None, json=None, header=None): """Patch requests for base mdm endpoints""" return self.client.patch(module=module, path=path, version=version, params=params, data=data, json=json, header=header) def _delete(self, module='MDM', path=None, version=None, params=None, header=None): return self.client.delete(module=module, path=path, version=version, params=params, header=header)
44.5
106
0.575991
196
1,691
4.923469
0.127551
0.072539
0.067358
0.088083
0.856995
0.856995
0.856995
0.856995
0.678756
0.678756
0
0
0.303962
1,691
37
107
45.702703
0.819881
0.09521
0
0.375
0
0
0.01004
0
0
0
0
0
0
1
0.25
false
0
0
0.041667
0.5
0
0
0
0
null
0
0
0
1
1
1
1
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
6
f4143afc129936c009fe216fb380b9d039ffc205
4,072
py
Python
Example_Cases/Evac_Stairs/Scripts/evac_flow_three_parameter_models.py
koverholt/bayes-fire
4333cdf7b93bf77d8e021f0c4a1931a77056534d
[ "BSD-3-Clause" ]
6
2016-06-19T12:44:22.000Z
2021-12-21T07:01:38.000Z
Example_Cases/Evac_Stairs/Scripts/evac_flow_three_parameter_models.py
koverholt/bayes-fire
4333cdf7b93bf77d8e021f0c4a1931a77056534d
[ "BSD-3-Clause" ]
null
null
null
Example_Cases/Evac_Stairs/Scripts/evac_flow_three_parameter_models.py
koverholt/bayes-fire
4333cdf7b93bf77d8e021f0c4a1931a77056534d
[ "BSD-3-Clause" ]
2
2017-10-15T02:37:25.000Z
2022-03-04T16:22:44.000Z
#!/usr/bin/env python """Module for setting up statistical models""" from __future__ import division import numpy as np import pymc as mc import evac_flow_three_parameter_graphics as graphics import data_evac def model1(): """ PyMC configuration with Model 1. """ # Priors theta = mc.Uniform('theta', lower=[-30, -30, -30, -30], upper=[ 30, 30, 30, 30], value=[ 15, 15, 15, 15]) sigma = mc.Uniform('sigma', lower=0., upper=100., value=1.) # Model @mc.deterministic def y_mean(theta=theta, occupants=data_evac.data_three_parameter['occupants'], exit_distance=data_evac.data_three_parameter['exit_distance'], type=data_evac.data_three_parameter['type']): return (theta[0] * occupants + theta[1] * exit_distance + theta[2] * type + theta[3]) # Likelihood # The likelihood is N(y_mean, sigma^2), where sigma # is pulled from a uniform distribution. y_obs = mc.Normal('y_obs', value=data_evac.data_three_parameter['pre_evac_int'], mu=y_mean, tau=sigma**-2, observed=True) return vars() def model2(): """ PyMC configuration with Model 2. """ # Priors theta = mc.Uniform('theta', lower=[-30, -30, -30, -30, -30, -30], upper=[ 30, 30, 30, 30, 30, 30], value=[ 15, 15, 15, 15, 15, 15]) sigma = mc.Uniform('sigma', lower=0., upper=100., value=1.) # Model @mc.deterministic def y_mean(theta=theta, occupants=data_evac.data_three_parameter['occupants'], exit_distance=data_evac.data_three_parameter['exit_distance'], type=data_evac.data_three_parameter['type'], riser=data_evac.data_three_parameter['riser'], tread=data_evac.data_three_parameter['tread']): return (theta[0] * occupants + theta[1] * exit_distance + theta[2] * type + theta[3] * riser + theta[4] * tread + theta[5]) # Likelihood # The likelihood is N(y_mean, sigma^2), where sigma # is pulled from a uniform distribution. y_obs = mc.Normal('y_obs', value=data_evac.data_three_parameter['travel_int'], mu=y_mean, tau=sigma**-2, observed=True) return vars() def model3(): """ PyMC configuration with Model 3. """ # Priors theta = mc.Uniform('theta', lower=[-30, -30, -30, -30, -30, -30, -30], upper=[ 30, 30, 30, 30, 30, 30, 30], value=[ 15, 15, 15, 15, 15, 15, 15]) sigma = mc.Uniform('sigma', lower=0., upper=100., value=1.) # Model @mc.deterministic def y_mean(theta=theta, occupants=data_evac.data_three_parameter['occupants'], exit_distance=data_evac.data_three_parameter['exit_distance'], type=data_evac.data_three_parameter['type'], riser=data_evac.data_three_parameter['riser'], tread=data_evac.data_three_parameter['tread'], evac_chair=data_evac.data_three_parameter['evac_chair']): return (theta[0] * occupants + theta[1] * exit_distance + theta[2] * type + theta[3] * riser + theta[4] * tread + theta[5] * evac_chair + theta[6]) # Likelihood # The likelihood is N(y_mean, sigma^2), where sigma # is pulled from a uniform distribution. y_obs = mc.Normal('y_obs', value=data_evac.data_three_parameter['exit_int'], mu=y_mean, tau=sigma**-2, observed=True) return vars()
33.105691
77
0.522839
468
4,072
4.354701
0.166667
0.054956
0.064769
0.141806
0.847399
0.834642
0.832679
0.832679
0.832679
0.823847
0
0.055789
0.357318
4,072
122
78
33.377049
0.722965
0.122544
0
0.641026
0
0
0.052181
0
0
0
0
0
0
1
0.076923
false
0
0.064103
0.038462
0.217949
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
f42e222544a5922ec0bd84ade388663fcb6d9cfd
34
py
Python
pystaffo/__init__.py
Lcapelli/PyStaffo
f115d4002f30c3a1802069f36b5c0616da78fe8b
[ "MIT" ]
2
2018-08-28T16:33:28.000Z
2022-03-03T03:24:22.000Z
pystaffo/__init__.py
Lcapelli/PyStaffo
f115d4002f30c3a1802069f36b5c0616da78fe8b
[ "MIT" ]
null
null
null
pystaffo/__init__.py
Lcapelli/PyStaffo
f115d4002f30c3a1802069f36b5c0616da78fe8b
[ "MIT" ]
2
2019-06-10T08:42:30.000Z
2019-06-13T08:59:20.000Z
from .staffo import StaffoAccount
17
33
0.852941
4
34
7.25
1
0
0
0
0
0
0
0
0
0
0
0
0.117647
34
1
34
34
0.966667
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
f46c339c9abbc490bf93eaede0de1fe1c9072c93
71
py
Python
networks/__init__.py
wonkyunglee/MPNet
3a6821a88a5e3db5bd97121761dbb361d9518bc2
[ "MIT" ]
null
null
null
networks/__init__.py
wonkyunglee/MPNet
3a6821a88a5e3db5bd97121761dbb361d9518bc2
[ "MIT" ]
null
null
null
networks/__init__.py
wonkyunglee/MPNet
3a6821a88a5e3db5bd97121761dbb361d9518bc2
[ "MIT" ]
null
null
null
import networks.deeplabv3 import networks.pspnet import networks.mpnet
17.75
25
0.873239
9
71
6.888889
0.555556
0.677419
0
0
0
0
0
0
0
0
0
0.015385
0.084507
71
3
26
23.666667
0.938462
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
be3ec3f998bc09640067576984730341926800be
45
py
Python
webapp/api/__init__.py
flyneat/microblog
5703d20355bd2b596f1397fa26d7058aa2705e60
[ "Apache-2.0" ]
null
null
null
webapp/api/__init__.py
flyneat/microblog
5703d20355bd2b596f1397fa26d7058aa2705e60
[ "Apache-2.0" ]
null
null
null
webapp/api/__init__.py
flyneat/microblog
5703d20355bd2b596f1397fa26d7058aa2705e60
[ "Apache-2.0" ]
null
null
null
from webapp.api import instruction, postfile
22.5
44
0.844444
6
45
6.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
45
1
45
45
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
be87079cbf8c5a8bdf2af2fea0c3581e906c3914
4,472
py
Python
src/dcan/ScoreTest.py
DCAN-Labs/score-conners-3
ad59627d4418491306c09830f76d1112d5dc83c3
[ "BSD-3-Clause" ]
null
null
null
src/dcan/ScoreTest.py
DCAN-Labs/score-conners-3
ad59627d4418491306c09830f76d1112d5dc83c3
[ "BSD-3-Clause" ]
null
null
null
src/dcan/ScoreTest.py
DCAN-Labs/score-conners-3
ad59627d4418491306c09830f76d1112d5dc83c3
[ "BSD-3-Clause" ]
null
null
null
import unittest import pandas as pd from dcan.Score import do_total_scoring, get_t_score_from_raw_score class ScoreTest(unittest.TestCase): def test_do_total_scoring(self): parents_score_file = "data/sample/inputdata_conners3parent.csv" actual_results = do_total_scoring(parents_score_file, 9, "male", 'parent') expected_results = \ {'LP': (7, 57), 'PR': (0, 44), 'IN': (4, 45), 'AG': (0, 43), 'HY': (1, 40), 'EF': (6, 48), 'PI': 1, 'NI': 3} expected_keys = expected_results.keys() self.assertEqual(len(expected_keys), len(actual_results.keys())) for key in expected_keys: self.assertEqual(expected_results[key], actual_results[key]) def test_do_total_scoring_female_10(self): parents_score_file = "data/sample/inputdata_Conners3parent_female10.csv" actual_results = do_total_scoring(parents_score_file, 10, "female", 'parent') expected_results = {'LP': (10, 64), 'PR': (10, 90), 'IN': (21, 82), 'AG': (9, 81), 'HY': (12, 67), 'EF': (21, 90), 'PI': 0, 'NI': 5} # IN (21, 82), HY (12, 67), LP (10, 64), EF (21, 90), AG (9, 81), PR (10, 90) expected_keys = expected_results.keys() self.assertEqual(len(expected_keys), len(actual_results.keys())) for key in expected_keys: self.assertEqual(expected_results[key], actual_results[key]) def test_do_total_scoring_teacher_female_10(self): parents_score_file = "data/sample/sub1000701_inputdata_teacher_female_10.csv" actual_results = do_total_scoring(parents_score_file, 10, "female", 'teacher') # (IN: 2, 45) (HY: 2, 45) (LE: 4, 44) (AG: 1, 50) (PR: 0, 44) expected_results = \ {'IN': (3, 47), 'HY': (2, 45), 'LE': (3, 43), 'AG': (0, 45), 'PR': (0, 44), 'PI': 2, 'NI': 0} expected_keys = expected_results.keys() self.assertEqual(len(expected_keys), len(actual_results.keys())) for key in expected_keys: self.assertEqual(expected_results[key], actual_results[key]) def test_do_total_scoring_teacher_male_9(self): parents_score_file = "data/sample/sub1000201_inputdata_teacher_9_male.csv" actual_results = do_total_scoring(parents_score_file, 9, "male", 'teacher') # (IN: 0, 42) (HY 0, 41) (LE: 5, 45) (AG: 0, 44) (PR: 0, 43) expected_results = \ {'IN': (0, 42), 'HY': (0, 41), 'LE': (5, 45), 'AG': (0, 44), 'PR': (0, 43), 'PI': 4, 'NI': 0} expected_keys = expected_results.keys() self.assertEqual(len(expected_keys), len(actual_results.keys())) for key in expected_keys: self.assertEqual(expected_results[key], actual_results[key]) def test_do_total_scoring_male_10(self): parents_score_file = "data/sample/inputdata_Conners3parent_male10.csv" actual_results = do_total_scoring(parents_score_file, 10, "male", 'parent') expected_results = \ {'LP': (10, 64), 'IN': (19, 75), 'AG': (5, 63), 'HY': (29, 89), 'EF': (13, 61), 'PR': (0, 43), 'PI': 0, 'NI': 3} # SUB-10016-01: {'LP': (10, 64), 'IN': (19, 75), 'AG': (5, 63), 'HY': (29, 89), 'EF': (13, 61), 'PR': (0, 43)} expected_keys = expected_results.keys() self.assertEqual(len(expected_keys), len(actual_results.keys())) for key in expected_keys: self.assertEqual(expected_results[key], actual_results[key]) def test_get_t_score_from_raw_score_high(self): raw_score = 23 csv_file = 'data/constant/parent/male_lp.csv' df = pd.read_csv(csv_file) age = 9 age_str = str(age) column_0_name = 'Unnamed: 0' age_column = df[[column_0_name, age_str]] scores_df = age_column.rename(columns={"Unnamed: 0": "t-score", age_str: "raw score"}) t_score = get_t_score_from_raw_score(raw_score, scores_df) self.assertEqual(90, t_score) def test_get_t_score_from_raw_score_low(self): raw_score = 0 csv_file = 'data/constant/parent/male_lp.csv' df = pd.read_csv(csv_file) age = 9 age_str = str(age) column_0_name = 'Unnamed: 0' age_column = df[[column_0_name, age_str]] scores_df = age_column.rename(columns={"Unnamed: 0": "t-score", age_str: "raw score"}) t_score = get_t_score_from_raw_score(raw_score, scores_df) self.assertEqual(40, t_score) if __name__ == '__main__': unittest.main()
50.247191
124
0.618292
643
4,472
4.004666
0.163297
0.075728
0.059806
0.025243
0.816311
0.79767
0.759223
0.759223
0.702524
0.659806
0
0.069533
0.224955
4,472
88
125
50.818182
0.673399
0.067755
0
0.547945
0
0
0.124159
0.073247
0
0
0
0
0.164384
1
0.09589
false
0
0.041096
0
0.150685
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
beb42b0e083330872f607549e756d2eda0676612
149,746
py
Python
likeyoubot_icarus.py
dogfooter-master/dogfooter
e1e39375703fe3019af7976f97c44cf2cb7ca0fa
[ "MIT" ]
null
null
null
likeyoubot_icarus.py
dogfooter-master/dogfooter
e1e39375703fe3019af7976f97c44cf2cb7ca0fa
[ "MIT" ]
null
null
null
likeyoubot_icarus.py
dogfooter-master/dogfooter
e1e39375703fe3019af7976f97c44cf2cb7ca0fa
[ "MIT" ]
null
null
null
import likeyoubot_game as lybgame import likeyoubot_icarus_scene as lybscene from likeyoubot_configure import LYBConstant as lybconstant import time import sys import tkinter from tkinter import ttk from tkinter import font import copy class LYBIcarus(lybgame.LYBGame): work_list = [ '게임 시작', '로그인', '메인 퀘스트', '정예 퀘스트', '자동 사냥', '펠로우 탐험', '펠로우 세트', '몬스터 결정', '가방 정리', '캐릭터 선택', '알림', '[반복 시작]', '[반복 종료]', '[작업 대기]', '[작업 예약]', '' ] icarus_icon_list = [ 'icarus_icon_0', 'icarus_icon_1', 'icarus_icon_1109', ] area_list = [ '브라카르 숲', '하카나스 직할령', '파르나의 땅' ] area_sub_list = [ [ '브라기 본거지', '잊혀진 유적지' ], [ '풍요의 언덕', '켈우즈 역병지대', '신룡의 해안가' ], [ '여신의 땅', '소르마 평원', '파를라크 얼음성채' ] ] quest_area_sub_list = [ [ '엘로라의 신전', '브라기 본거지', '잊혀진 유적지', '코쿤' ], [ '풍요의 언덕', '켈우즈 역병지대', '신룡의 해안가' ], [ '여신의 땅', '소르마 평원', '파를라크 얼음성채' ] ] elite_quest_sub_dic = { area_list[0]: quest_area_sub_list[0], area_list[1]: quest_area_sub_list[1], area_list[2]: quest_area_sub_list[2], } elite_quest_dic = { quest_area_sub_list[0][0] : [ '선택 안함', # 0 ], quest_area_sub_list[0][1] : [ '선택 안함', # 0 '도난당한 물건', '라이노 토벌', '렉스의 무기 조사', '빼앗긴 서신', '여왕의 날개', '요정의 장난', '튼튼한 가죽', ], quest_area_sub_list[0][2] : [ '선택 안함', # 0 '거미의 덫', '교단의 창궐', '남아있는 힘', '박쥐 퇴치', '유령 퇴치', '유행하는 물건', '지팡이 개조', '특별한 가면', '튼튼한 갑옷', '흑마법의 집약체', '힘의 근원 조사', ], quest_area_sub_list[0][3] : [ '선택 안함', # 0 ], quest_area_sub_list[1][0] : [ '선택 안함', # 0 '검은 그림자', '낯가리는 라비니', '내재된 힘 1장', '마을을 지켜라! 1장', '마을을 지켜라! 2장', '마을의 골칫덩이', '민감한 라비니', '벌집군 붕괴 현상', '사라진 고문서', '사로잡힌 라비니', '영역의 폭군', '이상 증세', '이상한 뾰족귀', '풍요의 눈물', '풍요의 조각', '풍요의 폭군', ], quest_area_sub_list[1][1] : [ '선택 안함', # 0 '내재된 힘 2장', '론도의 주술', '수상한 그림자', '습격받은 캠프', '아버지의 흔적', '안전제일', '연금술 실험', '연금술의 비원', '위험 제거 1장', '위험 제거 2장', '위험 제거 3장', '저주받은 마물', '좀비 토벌', '좀비의 우두머리', '켈우즈의 눈물', '켈우즈의 재앙', '켈우즈의 조각', ], quest_area_sub_list[1][2] : [ '선택 안함', # 0 '내재된 힘 3장', '선제공격', '작은 악마', '전략적 기회', '절벽의 폭탄', '캠프 수비 1장', '캠프 수비 2장', '캠프 수비 3장', '캠프 수비 4장', '하피의 본모습', '해안가의 눈물', '해안가의 마물', '해안가의 저주', '해안가의 조각', '해안가의 폭군', '해적소탕', '행운의 증표', ], quest_area_sub_list[2][0] : [ '선택 안함', # 0 '늑대의 가죽', '몸에 좋은 것', '보수를 해야 해 1장', '보수를 해야 해 2장', '분신 1장', '분신 2장', '야생곰의 꿀', '얼음 폭탄', '염탐꾼의 돋보기', '유지할 수 있는 것', '위험이 없도록', '위험한 길', '의지할 수 있는 것', '주술에 필요한 것', '캠프가 위험해', '통제 불가', ], quest_area_sub_list[2][1] : [ '선택 안함', # 0 '가려진 그림자', '가려진 슬픔', '가려진 진실', '거짓말', '또 다른 단서', '마지막 장식', '부정된 존재', '예비 수호자', '예비 제사장', '인어 알의 행방', '잊혀진 임무', '최고급 솜털', '최고급 무기', '추종자', '평원의 눈물 2장', '평원의 조각 1장', '평원의 폭군 3장', ], quest_area_sub_list[2][2] : [ '선택 안함', # 0 '결박된 자', '그릇된 맹세', '까다로운 상대 1장', '까다로운 상대 2장', '더럽히는 자', '보온에 좋은 재료', '보초병의 애환', '분신의 재료', '성주의 장비', '심연의 손길', '심연의 어둠', '어둠의 힘', '어지럽히는 자', '얼음성의 병사들', '오염된 심장', '이름과 가죽', '일꾼의 비애', '적의 내통법', '적의 소통법', '파를라크 감시자', '혹한의 비명', ], } # quest_scene_elite_캠프가 위험해_loc fellow_sort_list = [ '전체', '추천 순위', '지역', '능력치', ] menu_fellow_sub_list = [ '펠로우 가방', '펠로우 조각', '펠로우 세트', '펠로우 탐험' ] menu_seongjang_sub_list = [ '영혼석', '교본 연구', '몬스터 결정', '축복' ] fellow_set_list = [ '설정 안함', '속성 저항력', '시공의 틈', '파괴자의 습격', '주신의 길', '사냥', '기타', ] character_move_list = [ "↑", "↗", "→", "↘", "↓", "↙", "←", "↖" ] item_catalog_list = [ '무기', '방어구', '장신구' ] item_rank_list = [ '일반', '정예', '희귀', '영웅', '전설', '주신', '특별' ] item_gakseong_list = [ '불가', '1단계', '2단계', '3단계' ] fellow_catalog_list = [ '펠로우', '소모품', '재료' ] fellow_level_list = [ 'D', 'C', 'B', 'A', 'S', 'SS' ] tamheom_duration_list = [ [ '20m', '40m', '1h', '1h20m' ], [ '1h', '2h', '3h', '4h' ], [ '3h', '6h', '12h', '24h' ] ] def __init__(self, game_name, game_data_name, window): lybgame.LYBGame.__init__(self, lybconstant.LYB_GAME_ICARUS, lybconstant.LYB_GAME_DATA_ICARUS, window) def process(self, window_image): rc = super(LYBIcarus, self).process(window_image) if rc < 0: return rc return rc def custom_check(self, window_image, window_pixel): # SKIP (loc_x, loc_y), match_rate = self.locationResourceOnWindowPart( self.window_image, 'skip_loc', custom_threshold=0.7, custom_flag=1, custom_rect=(300, 30, 340, 360) ) if loc_x != -1: self.logger.warn('SKIP: ' + str((loc_x, loc_y)) + ' ' + str(match_rate)) self.window.mouse_click(self.hwnd, loc_x, loc_y) return 'skip' # NPC SKIP (loc_x, loc_y), match_rate = self.locationResourceOnWindowPart( self.window_image, 'npc_skip_loc', custom_threshold=0.7, custom_flag=1, custom_rect=(590, 300, 635, 340) ) if loc_x != -1: self.logger.warn('NPC SKIP: ' + str((loc_x, loc_y)) + ' ' + str(match_rate)) self.window.mouse_click(self.hwnd, loc_x, loc_y) return 'skip' # BOSS SKIP (loc_x, loc_y), match_rate = self.locationResourceOnWindowPart( self.window_image, 'npc_skip_loc', custom_threshold=0.7, custom_flag=1, custom_rect=(570, 30, 635, 80) ) if loc_x != -1: self.logger.warn('BOSS SKIP: ' + str((loc_x, loc_y)) + ' ' + str(match_rate)) self.window.mouse_click(self.hwnd, loc_x, loc_y) return 'skip' # 추석 팝업 pb_name = 'popup_0920' (loc_x, loc_y), match_rate = self.locationOnWindowPart( window_image, self.resource_manager.pixel_box_dic[pb_name], custom_threshold=0.8, custom_flag=1, custom_top_level=(70, 180, 240), custom_below_level=(60, 160, 210), custom_rect=(570, 20, 610, 70) ) if loc_x != -1: self.logger.warn(pb_name + ' ' + str((loc_x, loc_y)) + ' ' + str(match_rate)) self.window.mouse_click(self.hwnd, loc_x, loc_y) return 'popup' # 팝업 pb_name_list = [ 'useless_icon_0', 'useless_icon_1', ] for pb_name in pb_name_list: if pb_name_list.index(pb_name) == 0: custom_rect = (0, 40, 30, 380) else: custom_rect = (600, 40, 640, 380) (loc_x, loc_y), match_rate = self.locationOnWindowPart( window_image, self.resource_manager.pixel_box_dic[pb_name], custom_threshold=0.9, custom_flag=1, custom_top_level=(200, 200, 200), custom_below_level=(100, 100, 100), custom_rect=custom_rect ) if loc_x != -1: self.logger.warn(pb_name + ' ' + str((loc_x, loc_y)) + ' ' + str(match_rate)) self.window.mouse_click(self.hwnd, loc_x, loc_y) return pb_name pb_name = 'useless_trash' (loc_x, loc_y), match_rate = self.locationOnWindowPart( window_image, self.resource_manager.pixel_box_dic[pb_name], custom_threshold=0.9, custom_flag=1, custom_rect=(10, 180, 640, 240) ) if loc_x != -1: self.logger.warn(pb_name + ' ' + str((loc_x, loc_y)) + ' ' + str(match_rate)) self.window.mouse_click(self.hwnd, loc_x, loc_y) return pb_name # 정예 퀘스트 반복 수락 resource_name = 'main_scene_quest_repeat_loc' match_rate = self.rateMatchedResource(self.window_pixels, resource_name) #self.logger.info(resource_name + ' ' + str(round(match_rate, 2))) if match_rate > 0.9: self.mouse_click('main_scene_quest_repeat_accept') return resource_name # 아이템 획득 장착 pb_name = 'main_scene_item_get' match_rate = self.rateMatchedPixelBox(self.window_pixels, pb_name) if match_rate > 0.9: pb_name = 'main_scene_equip' match_rate = self.rateMatchedPixelBox(self.window_pixels, pb_name) if match_rate > 0.9: self.mouse_click(pb_name) return 'item' # 펠로우 획득 장착 pb_name = 'main_scene_fellow_get' match_rate = self.rateMatchedPixelBox(self.window_pixels, pb_name) if match_rate > 0.9: pb_name = 'main_scene_fellow_equip' match_rate = self.rateMatchedPixelBox(self.window_pixels, pb_name) if match_rate > 0.9: self.mouse_click(pb_name) return 'fellow' # 퀘스트 완료 OK pb_name = 'main_scene_quest_ok' match_rate = self.rateMatchedPixelBox(self.window_pixels, pb_name) if match_rate > 0.9: # self.mouse_click(pb_name) return 'fellow' return '' def get_screen_by_location(self, window_image): scene_name = self.scene_init_screen(window_image) if len(scene_name) > 0: return scene_name if self.get_option('search_google_account') == True: scene_name = self.scene_google_play_account_select(window_image) if len(scene_name) > 0: return scene_name # scene_name = self.jeontoo_scene(window_image) # if len(scene_name) > 0: # return scene_name return '' # def jeontoo_scene(self, window_image): # (loc_x, loc_y), match_rate = self.locationResourceOnWindowPart( # self.window_image, # 'jeontoo_scene_loc', # custom_below_level=(100, 100, 100), # custom_top_level=(255, 255, 255), # custom_threshold=0.7, # custom_flag=1, # custom_rect=(5, 90, 80, 130) # ) # if match_rate > 0.7: # return 'jeontoo_scene' # return '' def scene_init_screen(self, window_image): loc_x = -1 loc_y = -1 if self.player_type == 'nox': for each_icon in LYBIcarus.icarus_icon_list: (loc_x, loc_y), match_rate = self.locationOnWindowPart( window_image, self.resource_manager.pixel_box_dic[each_icon], custom_threshold=0.8, custom_flag=1, custom_rect=(80, 110, 570, 300) ) # print('[DEBUG] nox yh icon:', (loc_x, loc_y), match_rate) if loc_x != -1: break elif self.player_type == 'momo': for each_icon in LYBIcarus.icarus_icon_list: (loc_x, loc_y), match_rate = self.locationOnWindowPart( window_image, self.resource_manager.pixel_box_dic[each_icon], custom_threshold=0.8, custom_flag=1, custom_rect=(30, 10, 610, 300) ) # print('[DEBUG] momo yh icon:', (loc_x, loc_y), match_rate) if loc_x != -1: break if loc_x == -1: return '' return 'init_screen_scene' def scene_google_play_account_select(self, window_image): loc_x_list = [] loc_y_list = [] (loc_x, loc_y), match_rate = self.locationOnWindowPart( window_image, self.resource_manager.pixel_box_dic['google_play_letter'], custom_flag=1, custom_rect=(150, 50, 370, 250) ) loc_x_list.append(loc_x) loc_y_list.append(loc_y) for i in range(6): (loc_x, loc_y), match_rate = self.locationOnWindowPart( window_image, self.resource_manager.pixel_box_dic['google_play_letter_' + str(i)], custom_flag=1, custom_rect=(150, 50, 370, 250) ) loc_x_list.append(loc_x) loc_y_list.append(loc_y) for each_loc in loc_x_list: if each_loc == -1: return '' else: continue return 'google_play_account_select_scene' def clear_scene(self): last_scene = self.scene_dic self.scene_dic = {} for scene_name, scene in last_scene.items(): if ( 'google_play_account_select_scene' in scene_name or 'logo_screen_scene' in scene_name or 'connect_account_scene' in scene_name ): self.scene_dic[scene_name] = last_scene[scene_name] def add_scene(self, scene_name): self.scene_dic[scene_name] = lybscene.LYBIcarusScene(scene_name) self.scene_dic[scene_name].setLoggingQueue(self.logging_queue) self.scene_dic[scene_name].setGameObject(self) class LYBIcarusTab(lybgame.LYBGameTab): def __init__(self, root_frame, configure, game_options, inner_frame_dics, width, height, game_name=lybconstant.LYB_GAME_ICARUS): lybgame.LYBGameTab.__init__(self, root_frame, configure, game_options, inner_frame_dics, width, height, game_name) def set_work_list(self): lybgame.LYBGameTab.set_work_list(self) for each_work in LYBIcarus.work_list: self.option_dic['work_list_listbox'].insert('end', each_work) self.configure.common_config[self.game_name]['work_list'].append(each_work) def set_option(self): # PADDING frame = ttk.Frame( master = self.master, relief = self.frame_relief ) frame.pack(pady=5) self.inner_frame_dic['options'] = ttk.Frame( master = self.master, relief = self.frame_relief ) self.option_dic['option_note'] = ttk.Notebook( master = self.inner_frame_dic['options'] ) self.inner_frame_dic['common_tab_frame'] = ttk.Frame( master = self.option_dic['option_note'], relief = self.frame_relief ) self.inner_frame_dic['common_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True) self.option_dic['option_note'].add(self.inner_frame_dic['common_tab_frame'], text='일반') self.inner_frame_dic['work_tab_frame'] = ttk.Frame( master = self.option_dic['option_note'], relief = self.frame_relief ) self.inner_frame_dic['work_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True) self.option_dic['option_note'].add(self.inner_frame_dic['work_tab_frame'], text='작업') self.inner_frame_dic['work2_tab_frame'] = ttk.Frame( master = self.option_dic['option_note'], relief = self.frame_relief ) self.inner_frame_dic['work2_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True) self.option_dic['option_note'].add(self.inner_frame_dic['work2_tab_frame'], text='작업2') self.inner_frame_dic['notify_tab_frame'] = ttk.Frame( master = self.option_dic['option_note'], relief = self.frame_relief ) self.inner_frame_dic['notify_tab_frame'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True) self.option_dic['option_note'].add(self.inner_frame_dic['notify_tab_frame'], text='알림') # ------ # 일반 탭 좌측 frame_l = ttk.Frame(self.inner_frame_dic['common_tab_frame']) frame_label = ttk.LabelFrame(frame_l, text='스킬') frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'co_skill'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'co_skill'].trace( 'w', lambda *args: self.callback_co_skill(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'co_skill') ) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'co_skill' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'co_skill'] = True check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('연계 스킬 사용', width=14), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'co_skill'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) # frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('펠로우 스킬 쿨타임(초)') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'fellow_skill_cooltime'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'fellow_skill_cooltime'].trace( 'w', lambda *args: self.callback_fellow_skill_cooltime(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'fellow_skill_cooltime') ) combobox_list = [] for i in range(0, 301): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'fellow_skill_cooltime' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'fellow_skill_cooltime'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'fellow_skill_cooltime'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'fellow_skill_cooltime']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) # frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('각성 스킬 쿨타임(초)') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'gakseong_skill_cooltime'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'gakseong_skill_cooltime'].trace( 'w', lambda *args: self.callback_gakseong_skill_cooltime(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'gakseong_skill_cooltime') ) combobox_list = [] for i in range(0, 301): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'gakseong_skill_cooltime' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'gakseong_skill_cooltime'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'gakseong_skill_cooltime'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'gakseong_skill_cooltime']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('파티 스킬 쿨타임(초)') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'party_skill_cooltime'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'party_skill_cooltime'].trace( 'w', lambda *args: self.callback_etc_party_skill_cooltime(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'party_skill_cooltime') ) combobox_list = [] for i in range(0, 301): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'party_skill_cooltime' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'party_skill_cooltime'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'party_skill_cooltime'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'party_skill_cooltime']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label = ttk.LabelFrame(frame_l, text='길들이기') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('길들이기 쿨타임(초)(0:안함)') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming'].trace( 'w', lambda *args: self.callback_etc_taming(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming') ) combobox_list = [] for i in range(0, 301): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('길들이기 반응 속도(ms)') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming_response'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming_response'].trace( 'w', lambda *args: self.callback_etc_taming_response(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming_response') ) combobox_list = [] for i in range(1, 9001, 1): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming_response' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming_response'] = 10 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming_response'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'taming_response']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label = ttk.LabelFrame(frame_l, text='기타') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('문자 인식 허용률(%)') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_threshold'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_threshold'].trace( 'w', lambda *args: self.callback_elite_quest_threshold(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_threshold') ) combobox_list = [] for i in range(50, 101): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_threshold' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_threshold'] = 60 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_threshold'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_threshold']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('퀘스트 완료 탐색 주기(초)') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quest_complete_period'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quest_complete_period'].trace( 'w', lambda *args: self.callback_etc_quest_complete_period(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quest_complete_period') ) combobox_list = [] for i in range(0, 3601): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quest_complete_period' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quest_complete_period'] = 3 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quest_complete_period'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quest_complete_period']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('자동 전환 감지 횟수(회)') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'auto_detection_limit'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'auto_detection_limit'].trace( 'w', lambda *args: self.callback_etc_auto_detection_limit(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'auto_detection_limit') ) combobox_list = [] for i in range(1, 61): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'auto_detection_limit' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'auto_detection_limit'] = 2 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'auto_detection_limit'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'auto_detection_limit']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'friend_react'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'friend_react'].trace( 'w', lambda *args: self.callback_friend_react(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'friend_react') ) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'friend_react' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'friend_react'] = True check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('친구 상호 작용', width=14), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'friend_react'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) label = ttk.Label( master = frame, text = " " ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'accept_invite'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'accept_invite'].trace( 'w', lambda *args: self.callback_accept_invite(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'accept_invite') ) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'accept_invite' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'accept_invite'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('파티 초대 수락', width=14), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'accept_invite'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_new_mail'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_new_mail'].trace( 'w', lambda *args: self.callback_check_new_mail(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_new_mail') ) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_new_mail' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_new_mail'] = True check_box = ttk.Checkbutton( master = frame, text = '우편함 확인', variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_new_mail'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_empty_potion'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_empty_potion'].trace( 'w', lambda *args: self.callback_check_empty_potion(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_empty_potion') ) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_empty_potion' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_empty_potion'] = True check_box = ttk.Checkbutton( master = frame, text = '물약 없을 때 구매하기', variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'check_empty_potion'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 일반 탭 중간 frame_m = ttk.Frame(self.inner_frame_dic['common_tab_frame']) frame_label = ttk.LabelFrame(frame_m, text='퀵슬롯 클릭 주기') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('퀵슬롯 1번(초)(0:안함)') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_0'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_0'].trace( 'w', lambda *args: self.callback_etc_quickslot_period_0(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_0') ) combobox_list = [] for i in range(0, 3601): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_0' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_0'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_0'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_0']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('퀵슬롯 2번') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_1'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_1'].trace( 'w', lambda *args: self.callback_etc_quickslot_period_1(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_1') ) combobox_list = [] for i in range(0, 3601): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_1' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_1'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_1'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_1']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('퀵슬롯 3번') ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_2'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_2'].trace( 'w', lambda *args: self.callback_etc_quickslot_period_2(args, lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_2') ) combobox_list = [] for i in range(0, 3601): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_2' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_2'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_2'], state = "readonly", height = 10, width = 5, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_ETC + 'quickslot_period_2']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 일반 탭 우측 frame_r = ttk.Frame(self.inner_frame_dic['common_tab_frame']) frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 작업 탭 좌측 frame_l = ttk.Frame(self.inner_frame_dic['work_tab_frame']) frame_label = ttk.LabelFrame(frame_l, text='메인 퀘스트') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('진행 시간(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_duration'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_duration'].trace( 'w', lambda *args: self.callback_main_quest_duration(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_duration') ) combobox_list = [] for i in range(60, 86401, 60): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_duration' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_duration'] = 3600 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_duration'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_duration']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('랙 체크 주기(초)(0:안함)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_lag_period'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_lag_period'].trace( 'w', lambda *args: self.callback_main_quest_lag_period(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_lag_period') ) combobox_list = [] for i in range(0, 3601): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_lag_period' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_lag_period'] = 60 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_lag_period'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_lag_period']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_sub'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_sub'].trace( 'w', lambda *args: self.callback_main_quest_sub(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_sub') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_sub' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_sub'] = True check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('서브 퀘스트 우선 수행'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_sub'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_local'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_local'].trace( 'w', lambda *args: self.callback_main_quest_local(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_local') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_local' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_local'] = True check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('지역 퀘스트 우선 수행'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_local'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('서브 퀘스트 탐색 페이지 수', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_search_sub'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_search_sub'].trace( 'w', lambda *args: self.callback_main_quest_search_sub(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_search_sub') ) combobox_list = [] for i in range(0, 11): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_search_sub' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_search_sub'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_search_sub'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'main_quest_search_sub']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label = ttk.LabelFrame(frame_l, text='자동 사냥') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('진행 시간(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_duration'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_duration'].trace( 'w', lambda *args: self.callback_auto_duration(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_duration') ) combobox_list = [] for i in range(60, 86401, 60): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_duration' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_duration'] = 3600 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_duration'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_duration']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('퀘스트 클릭 주기(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_period'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_period'].trace( 'w', lambda *args: self.callback_auto_questclick_period(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_period') ) combobox_list = [] for i in range(60, 86401, 60): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_period' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_period'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_period'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_period']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_elite'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_elite'].trace( 'w', lambda *args: self.callback_auto_questclick_elite(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_elite') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_elite' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_elite'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('정예'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'auto_questclick_elite'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label = ttk.LabelFrame(frame_l, text='캐릭터 선택') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('선택 번호', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'character_number'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'character_number'].trace( 'w', lambda *args: self.callback_character_number(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'character_number') ) combobox_list = [] for i in range(1, 6): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'character_number' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'character_number'] = combobox_list[0] combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'character_number'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'character_number']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label = ttk.LabelFrame(frame_l, text='펠로우 세트') frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_map'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_map'].trace( 'w', lambda *args: self.callback_fellow_set_map(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_map') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_map' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_map'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('맵 열어서 이동하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_map'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('세트 효과', width=19) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_name'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_name'].trace( 'w', lambda *args: self.callback_fellow_set_name(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_name') ) combobox_list = LYBIcarus.fellow_set_list if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_name' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_name'] = combobox_list[0] combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_name'], state = "readonly", height = 10, width = 15, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_name']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('진행 시간(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_duration'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_duration'].trace( 'w', lambda *args: self.callback_fellow_set_duration(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_duration') ) combobox_list = [] for i in range(60, 86401, 60): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_duration' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_duration'] = 3600 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_duration'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_duration']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('체크 주기(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_period'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_period'].trace( 'w', lambda *args: self.callback_fellow_set_period(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_period') ) combobox_list = [] for i in range(10, 86401, 10): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_period' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_period'] = 120 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_period'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_set_period']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 작업 탭 중간 frame_m = ttk.Frame(self.inner_frame_dic['work_tab_frame']) frame_label = ttk.LabelFrame(frame_m, text='정예 퀘스트') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('진행 시간(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_duration'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_duration'].trace( 'w', lambda *args: self.callback_elite_quest_duration(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_duration') ) combobox_list = [] for i in range(60, 86401, 60): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_duration' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_duration'] = 600 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_duration'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_duration']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('반복 횟수(0:무한)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_limit'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_limit'].trace( 'w', lambda *args: self.callback_elite_quest_limit(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_limit') ) combobox_list = [] for i in range(0, 1001): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_limit' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_limit'] = 0 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_limit'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_limit']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('랙 체크 주기(초)(0:안함)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_lag'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_lag'].trace( 'w', lambda *args: self.callback_elite_quest_lag(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_lag') ) combobox_list = [] for i in range(0, 3601): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_lag' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_lag'] = 60 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_lag'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_lag']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('퀘스트 체크 주기(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_check'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_check'].trace( 'w', lambda *args: self.callback_elite_quest_check(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_check') ) combobox_list = [] for i in range(0, 3601): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_check' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_check'] = 120 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_check'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_check']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('수행 지역(대분류)', width=19) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area'].trace( 'w', lambda *args: self.callback_elite_quest_area(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area') ) combobox_list = LYBIcarus.area_list if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area'] = combobox_list[0] combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area'], state = "readonly", height = 10, width = 15, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('수행 지역(소분류)', width=19) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub'].trace( 'w', lambda *args: self.callback_elite_quest_area_sub(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub') ) area_index = LYBIcarus.area_list.index(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area']) combobox_list = LYBIcarus.quest_area_sub_list[area_index] if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub'] = combobox_list[0] self.elite_quest_area_sub_combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub'], state = "readonly", height = 10, width = 15, font = lybconstant.LYB_FONT ) self.elite_quest_area_sub_combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub']) self.elite_quest_area_sub_combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) s = ttk.Style() s.configure('red_label.TLabel', foreground='red') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('이동할 퀘스트', width=14), style = 'red_label.TLabel' ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_go'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_go'].trace( 'w', lambda *args: self.callback_elite_quest_go(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_go') ) combobox_list = LYBIcarus.elite_quest_dic[self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub']] if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_go' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_go'] = combobox_list[0] self.elite_quest_go_combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_go'], state = "readonly", height = 10, width = 20, font = lybconstant.LYB_FONT ) self.elite_quest_go_combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_go']) self.elite_quest_go_combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) self.elite_quest_accept_combobox = [] for i in range(5): frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('수락할 퀘스트' + str(i + 1), width=14), ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + str(i)] = tkinter.StringVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '0'].trace( 'w', lambda *args: self.callback_elite_quest_accept_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '0') ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '1'].trace( 'w', lambda *args: self.callback_elite_quest_accept_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '1') ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '2'].trace( 'w', lambda *args: self.callback_elite_quest_accept_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '2') ) elif i == 3: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '3'].trace( 'w', lambda *args: self.callback_elite_quest_accept_3(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '3') ) elif i == 4: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '4'].trace( 'w', lambda *args: self.callback_elite_quest_accept_4(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + '4') ) combobox_list = LYBIcarus.elite_quest_dic[self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub']] if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + str(i)] = combobox_list[0] combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + str(i)], state = "readonly", height = 10, width = 20, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + str(i)]) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) self.elite_quest_accept_combobox.append(combobox) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 작업 탭 우측 frame_r = ttk.Frame(self.inner_frame_dic['work_tab_frame']) frame_label = ttk.LabelFrame(frame_r, text='펠로우 탐험') frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend'].trace( 'w', lambda *args: self.callback_fellow_tamheom_recommend(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend'] = True check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('추천 펠로우 사용하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend_bottom'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend_bottom'].trace( 'w', lambda *args: self.callback_fellow_tamheom_recommend_bottom(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend_bottom') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend_bottom' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend_bottom'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('아랫쪽 추천 펠로우부터 등록하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_recommend_bottom'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('체크 주기(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_fellow_check'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_fellow_check'].trace( 'w', lambda *args: self.callback_elite_quest_fellow_check(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_fellow_check') ) combobox_list = [] for i in range(0, 3601): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_fellow_check' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_fellow_check'] = 600 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_fellow_check'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_fellow_check']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('정렬', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_sort'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_sort'].trace( 'w', lambda *args: self.callback_fellow_tamheom_sort(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_sort') ) combobox_list = LYBIcarus.fellow_sort_list if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_sort' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_sort'] = combobox_list[1] combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_sort'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_sort']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) s = ttk.Style() s.configure('green_label.TLabel', foreground='green') tamheom_area_list = [ '위', '중간', '아래' ] for i in range(len(LYBIcarus.tamheom_duration_list)): frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text(tamheom_area_list[i], width=5), style = 'green_label.TLabel' ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_' + str(i)] = tkinter.StringVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_0'].trace( 'w', lambda *args: self.callback_fellow_tamheom_duration_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_0') ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_1'].trace( 'w', lambda *args: self.callback_fellow_tamheom_duration_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_1') ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_2'].trace( 'w', lambda *args: self.callback_fellow_tamheom_duration_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_2') ) if i == 0: if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_' + str(i)] = 0 else: if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_' + str(i)] = len(LYBIcarus.tamheom_duration_list[i]) - 1 radio_list = [] for j in range(len(LYBIcarus.tamheom_duration_list[i])): radio_list.append((LYBIcarus.tamheom_duration_list[i][j], j)) for text, mode in radio_list: radioButton = ttk.Radiobutton( master = frame, text = self.get_option_text(text, width=3), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_duration_' + str(i)], value = mode ) radioButton.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) # frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_20'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_20'].trace( 'w', lambda *args: self.callback_fellow_tamheom_20(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_20') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_20' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_20'] = True # check_box = ttk.Checkbutton( # master = frame, # text = self.get_option_text('20분짜리 탐험이 인식되면 수행하기'), # variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'fellow_tamheom_20'], # onvalue = True, # offvalue = False # ) # check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) # frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label = ttk.LabelFrame(frame_r, text='몬스터 결정') frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_now'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_now'].trace( 'w', lambda *args: self.callback_monster_crystal_now(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_now') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_now' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_now'] = True check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('현재 지역에서만 작업하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_now'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_map'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_map'].trace( 'w', lambda *args: self.callback_monster_crystal_map(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_map') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_map' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_map'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('맵 열어서 이동하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_map'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_auto_off'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_auto_off'].trace( 'w', lambda *args: self.callback_monster_crystal_auto_off(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_auto_off') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_auto_off' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_auto_off'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('체크하기 전에 항상 자동 끄기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_auto_off'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('수행 지역(대분류)', width=19) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_area'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_area'].trace( 'w', lambda *args: self.callback_monster_crystal_area(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_area') ) combobox_list = LYBIcarus.area_list if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_area' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_area'] = combobox_list[0] combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_area'], state = "readonly", height = 10, width = 15, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_area']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('수행 지역(소분류)', width=19) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_sub_area'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_sub_area'].trace( 'w', lambda *args: self.callback_monster_crystal_sub_area(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_sub_area') ) combobox_list = LYBIcarus.area_sub_list if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_sub_area' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_sub_area'] = combobox_list[0][0] self.monster_area_sub_combobox = ttk.Combobox( master = frame, values = combobox_list[0], textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_sub_area'], state = "readonly", height = 10, width = 15, font = lybconstant.LYB_FONT ) self.monster_area_sub_combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_sub_area']) self.monster_area_sub_combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('진행 시간(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_duration'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_duration'].trace( 'w', lambda *args: self.callback_monster_crystal_duration(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_duration') ) combobox_list = [] for i in range(60, 86401, 60): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_duration' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_duration'] = 3600 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_duration'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_duration']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('체크 주기(초)', width=27) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_period'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_period'].trace( 'w', lambda *args: self.callback_monster_crystal_period(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_period') ) combobox_list = [] for i in range(10, 86401, 10): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_period' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_period'] = 120 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_period'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_period']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 작업 탭 좌측 frame_l = ttk.Frame(self.inner_frame_dic['work2_tab_frame']) frame_label = ttk.LabelFrame(frame_l, text='가방 정리') frame_label_inner = ttk.LabelFrame(frame_label, text='장비') frame = ttk.Frame(frame_label_inner) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi'].trace( 'w', lambda *args: self.callback_work_gabang_jangbi(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi'] = True check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('장비 정리하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_auto_equip'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_auto_equip'].trace( 'w', lambda *args: self.callback_work_gabang_jangbi_auto_equip(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_auto_equip') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_auto_equip' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_auto_equip'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('자동 장착하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_auto_equip'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_config'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_config'].trace( 'w', lambda *args: self.callback_work_gabang_jangbi_sell_config(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_config') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_config' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_config'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('판매 설정 사용하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_config'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_config'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_config'].trace( 'w', lambda *args: self.callback_work_gabang_jangbi_bunhe_config(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_config') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_config' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_config'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('분해 설정 사용하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_config'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) label = ttk.Label( master = frame, text = self.get_option_text('일괄 판매 수행 횟수', width=22) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_count'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_count'].trace( 'w', lambda *args: self.callback_work_gabang_jangbi_sell_count(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_count') ) combobox_list = [] for i in range(0, 101): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_count' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_count'] = 10 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_count'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_sell_count']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) label = ttk.Label( master = frame, text = self.get_option_text('일괄 분해 수행 횟수', width=22) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_count'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_count'].trace( 'w', lambda *args: self.callback_work_gabang_jangbi_bunhe_count(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_count') ) combobox_list = [] for i in range(0, 101): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_count' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_count'] = 10 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_count'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_jangbi_bunhe_count']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label_inner.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label_inner = ttk.LabelFrame(frame_label, text='펠로우') frame = ttk.Frame(frame_label_inner) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow'].trace( 'w', lambda *args: self.callback_work_gabang_fellow(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow'] = True check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('펠로우 정리하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_auto_equip'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_auto_equip'].trace( 'w', lambda *args: self.callback_work_gabang_fellow_auto_equip(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_auto_equip') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_auto_equip' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_auto_equip'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('자동 장착하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_auto_equip'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_config'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_config'].trace( 'w', lambda *args: self.callback_work_gabang_fellow_sell_config(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_config') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_config' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_config'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('판매 설정 사용하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_config'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_config'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_config'].trace( 'w', lambda *args: self.callback_work_gabang_fellow_bunhe_config(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_config') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_config' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_config'] = False check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('추출 설정 사용하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_config'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_yougu'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_yougu'].trace( 'w', lambda *args: self.callback_work_gabang_fellow_yougu(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_yougu') ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_yougu' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_yougu'] = True check_box = ttk.Checkbutton( master = frame, text = self.get_option_text('유그라드실의 열매 제외하기'), variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_yougu'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) label = ttk.Label( master = frame, text = self.get_option_text('일괄 판매 수행 횟수', width=22) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_count'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_count'].trace( 'w', lambda *args: self.callback_work_gabang_fellow_sell_count(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_count') ) combobox_list = [] for i in range(0, 101): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_count' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_count'] = 10 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_count'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_sell_count']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label_inner) label = ttk.Label( master = frame, text = self.get_option_text('일괄 추출 수행 횟수', width=22) ) label.pack(side=tkinter.LEFT) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_count'] = tkinter.StringVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_count'].trace( 'w', lambda *args: self.callback_work_gabang_fellow_bunhe_count(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_count') ) combobox_list = [] for i in range(0, 101): combobox_list.append(str(i)) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_count' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_count'] = 10 combobox = ttk.Combobox( master = frame, values = combobox_list, textvariable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_count'], state = "readonly", height = 10, width = 7, font = lybconstant.LYB_FONT ) combobox.set(self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'gabang_fellow_bunhe_count']) combobox.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label_inner.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 작업 탭 중간 frame_m = ttk.Frame(self.inner_frame_dic['work2_tab_frame']) frame_label = ttk.LabelFrame(frame_m, text='장비 판매 설정') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('종류', width=8) ) label.pack(side=tkinter.LEFT) for i in range(3): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_catalog_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_catalog_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_catalog_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(2)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_catalog_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_catalog' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('희귀도', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_rank_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_rank_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_rank_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(2)) ) elif i == 3: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(3)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_rank_3(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(3)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_rank_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text(' ', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4, 7): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(i) ] = tkinter.BooleanVar(frame) if i == 4: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(4)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_rank_4(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(4)) ) elif i == 5: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(5)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_rank_5(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(5)) ) elif i == 6: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(6)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_rank_6(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(6)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_rank_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_rank' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('각성', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_gakseong_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_gakseong_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_gakseong_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(2)) ) elif i == 3: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(3)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_config_gakseong_3(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(3)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_gakseong_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_config_gakseong' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label = ttk.LabelFrame(frame_m, text='장비 분해 설정') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('종류', width=8) ) label.pack(side=tkinter.LEFT) for i in range(3): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_catalog_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_catalog_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_catalog_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(2)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_catalog_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_catalog' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('희귀도', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_rank_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_rank_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_rank_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(2)) ) elif i == 3: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(3)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_rank_3(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(3)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_rank_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text(' ', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4, 7): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(i) ] = tkinter.BooleanVar(frame) if i == 4: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(4)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_rank_4(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(4)) ) elif i == 5: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(5)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_rank_5(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(5)) ) elif i == 6: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(6)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_rank_6(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(6)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_rank_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_rank' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('각성', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_gakseong_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_gakseong_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_gakseong_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(2)) ) elif i == 3: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(3)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_config_gakseong_3(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(3)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_gakseong_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_config_gakseong' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 작업 탭 우측 frame_r = ttk.Frame(self.inner_frame_dic['work2_tab_frame']) frame_label = ttk.LabelFrame(frame_r, text='펠로우 판매 설정') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('종류', width=8) ) label.pack(side=tkinter.LEFT) for i in range(3): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_catalog_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_catalog_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_catalog_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(2)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.fellow_catalog_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_catalog' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('희귀도', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_rank_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_rank_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_rank_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(2)) ) elif i == 3: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(3)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_rank_3(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(3)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_rank_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text(' ', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4, 7): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(i) ] = tkinter.BooleanVar(frame) if i == 4: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(4)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_rank_4(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(4)) ) elif i == 5: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(5)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_rank_5(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(5)) ) elif i == 6: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(6)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_rank_6(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(6)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_rank_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_rank' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('등급', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_level_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_level_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_level_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(2)) ) elif i == 3: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(3)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_level_3(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(3)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.fellow_level_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text(' ', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4, 6): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(i) ] = tkinter.BooleanVar(frame) if i == 4: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(4)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_level_4(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(4)) ) elif i == 5: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(5)].trace( 'w', lambda *args: self.callback_work_jangbi_sell_fellow_config_level_5(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(5)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.fellow_level_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_sell_fellow_config_level' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_label = ttk.LabelFrame(frame_r, text='펠로우 추출 설정') frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('희귀도', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_rank_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_rank_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_rank_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(2)) ) elif i == 3: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(3)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_rank_3(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(3)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_rank_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text(' ', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4, 7): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(i) ] = tkinter.BooleanVar(frame) if i == 4: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(4)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_rank_4(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(4)) ) elif i == 5: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(5)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_rank_5(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(5)) ) elif i == 6: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(6)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_rank_6(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(6)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.item_rank_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_rank' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text('등급', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(i) ] = tkinter.BooleanVar(frame) if i == 0: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(0)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_level_0(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(0)) ) elif i == 1: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(1)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_level_1(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(1)) ) elif i == 2: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(2)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_level_2(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(2)) ) elif i == 3: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(3)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_level_3(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(3)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.fellow_level_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame = ttk.Frame(frame_label) label = ttk.Label( master = frame, text = self.get_option_text(' ', width=8) ) label.pack(side=tkinter.LEFT) for i in range(4, 6): self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(i) ] = tkinter.BooleanVar(frame) if i == 4: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(4)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_level_4(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(4)) ) elif i == 5: self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(5)].trace( 'w', lambda *args: self.callback_work_jangbi_bunhe_fellow_config_level_5(args, lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(5)) ) if not lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(i) in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(i)] = False check_box = ttk.Checkbutton( master = frame, text = LYBIcarus.fellow_level_list[i], variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_WORK + 'jangbi_bunhe_fellow_config_level' + str(i)], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.W) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 알림 탭 좌 frame_l = ttk.Frame(self.inner_frame_dic['notify_tab_frame']) frame_label = ttk.Frame(frame_l) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'elite_bosang'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'elite_bosang'].trace( 'w', lambda *args: self.callback_notify_elite_bosang(args, lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'elite_bosang') ) if not lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'elite_bosang' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'elite_bosang'] = True check_box = ttk.Checkbutton( master = frame, text = '정예 퀘스트 보상', variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'elite_bosang'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.NW) frame = ttk.Frame(frame_label) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'character_death'] = tkinter.BooleanVar(frame) self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'character_death'].trace( 'w', lambda *args: self.callback_notify_character_death(args, lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'character_death') ) if not lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'character_death' in self.configure.common_config[self.game_name]: self.configure.common_config[self.game_name][lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'character_death'] = True check_box = ttk.Checkbutton( master = frame, text = '캐릭터 사망', variable = self.option_dic[lybconstant.LYB_DO_STRING_ICARUS_NOTIFY + 'character_death'], onvalue = True, offvalue = False ) check_box.pack(anchor=tkinter.W, side=tkinter.LEFT) frame.pack(anchor=tkinter.NW) frame_label.pack(anchor=tkinter.NW, padx=5, pady=5) frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 알림 탭 중 frame_m = ttk.Frame(self.inner_frame_dic['notify_tab_frame']) frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW) # 알림 탭 우 frame_r = ttk.Frame(self.inner_frame_dic['notify_tab_frame']) frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW) # ------ # frame_l = ttk.Frame(self.inner_frame_dic['work2_tab_frame']) # frame_l.pack(side=tkinter.LEFT, anchor=tkinter.NW) # # 작업 탭 중간 # frame_m = ttk.Frame(self.inner_frame_dic['work2_tab_frame']) # frame_m.pack(side=tkinter.LEFT, anchor=tkinter.NW) # # 작업 탭 우측 # frame_r = ttk.Frame(self.inner_frame_dic['work2_tab_frame']) # frame_r.pack(side=tkinter.LEFT, anchor=tkinter.NW) # ------ self.option_dic['option_note'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True) self.inner_frame_dic['options'].pack(anchor=tkinter.NW, fill=tkinter.BOTH, expand=True) self.set_game_option() def callback_fellow_tamheom_duration_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_tamheom_duration_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_tamheom_duration_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_level_5(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_level_4(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_level_3(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_level_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_level_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_level_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_rank_6(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_rank_5(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_rank_4(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_rank_3(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_rank_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_rank_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_fellow_config_rank_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_level_5(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_level_4(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_level_3(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_level_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_level_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_level_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_rank_6(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_rank_5(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_rank_4(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_rank_3(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_rank_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_rank_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_rank_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_catalog_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_catalog_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_fellow_config_catalog_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_gakseong_3(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_gakseong_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_gakseong_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_gakseong_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_rank_6(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_rank_5(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_rank_4(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_rank_3(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_rank_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_rank_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_rank_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_catalog_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_catalog_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_bunhe_config_catalog_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_gakseong_3(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_gakseong_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_gakseong_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_gakseong_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_rank_6(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_rank_5(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_rank_4(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_rank_3(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_rank_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_rank_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_rank_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_catalog_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_catalog_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_jangbi_sell_config_catalog_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_fellow_sell_config(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_fellow_bunhe_config(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_fellow_yougu(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_fellow_bunhe_count(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_fellow_sell_count(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_fellow_auto_equip(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_fellow(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_jangbi_sell_config(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_jangbi_bunhe_config(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_jangbi_bunhe_count(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_jangbi_sell_count(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_jangbi_auto_equip(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_work_gabang_jangbi(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_tamheom_20(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_set_map(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_set_name(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_set_duration(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_set_period(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_character_number(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_monster_crystal_sub_area(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_monster_crystal_auto_off(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_monster_crystal_map(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_monster_crystal_now(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_monster_crystal_area(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) area_index = LYBIcarus.area_list.index(self.option_dic[option_name].get()) new_list = LYBIcarus.area_sub_list[area_index] self.monster_area_sub_combobox['values'] = new_list if not self.get_game_config(lybconstant.LYB_DO_STRING_ICARUS_WORK + 'monster_crystal_sub_area') in new_list: self.monster_area_sub_combobox.set(new_list[0]) def callback_monster_crystal_duration(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_monster_crystal_period(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_tamheom_sort(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_tamheom_recommend_bottom(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_tamheom_recommend(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_etc_party_skill_cooltime(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_gakseong_skill_cooltime(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_notify_character_death(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_notify_elite_bosang(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_accept_invite(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_check_empty_potion(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_etc_quickslot_period_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_etc_quickslot_period_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_etc_quickslot_period_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_check_new_mail(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_etc_taming_response(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_etc_taming(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_etc_auto_detection_limit(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_friend_react(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_etc_quest_complete_period(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_auto_duration(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_auto_questclick_period(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_auto_questclick_elite(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_fellow_check(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_check(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_lag(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_accept_0(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_accept_1(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_accept_2(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_accept_3(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_accept_4(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_go(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_area(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) new_list = LYBIcarus.elite_quest_sub_dic[self.option_dic[option_name].get()] self.elite_quest_area_sub_combobox['values'] = new_list if not self.get_game_config(lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_area_sub') in new_list: self.elite_quest_area_sub_combobox.set(new_list[0]) def callback_elite_quest_area_sub(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) new_list = LYBIcarus.elite_quest_dic[self.option_dic[option_name].get()] self.elite_quest_go_combobox['values'] = new_list # self.logger.warn("DEBUG1: " + self.get_game_config(lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_go')) if not self.get_game_config(lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_go') in new_list: self.elite_quest_go_combobox.set(new_list[0]) # if not self.elite_quest_go_combobox.get() in new_list: # self.elite_quest_go_combobox.set(new_list[0]) for i in range(5): # self.logger.warn("DEBUG2: " + self.get_game_config(lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + str(i))) self.elite_quest_accept_combobox[i]['values'] = new_list if not self.get_game_config(lybconstant.LYB_DO_STRING_ICARUS_WORK + 'elite_quest_accept' + str(i)) in new_list: self.elite_quest_accept_combobox[i].set(new_list[0]) # if not self.elite_quest_accept_combobox[i].get() in new_list: # self.elite_quest_accept_combobox[i].set( new_list[0]) def callback_elite_quest_threshold(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_limit(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_fellow_skill_cooltime(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_co_skill(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_cond_skill(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_main_quest_duration(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_elite_quest_duration(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_main_quest_lag_period(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_main_quest_sub(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_main_quest_search_sub(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get()) def callback_main_quest_local(self, args, option_name): self.set_game_config(option_name, self.option_dic[option_name].get())
41.206935
172
0.744347
21,898
149,746
4.712439
0.031464
0.09144
0.098146
0.134951
0.958321
0.949105
0.945345
0.935315
0.915711
0.89228
0
0.009568
0.141513
149,746
3,633
173
41.218277
0.793085
0.014384
0
0.490791
0
0
0.118025
0.064914
0
0
0
0
0
1
0.047749
false
0
0.00307
0
0.064461
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
fe2a6e3aa387f5e6c0048780663b36380bafe21f
4,799
py
Python
mayan/apps/document_states/tests/test_links.py
darrenflexxu/Mayan-EDMS
6707365bfacd137e625ddc1b990168012246fa07
[ "Apache-2.0" ]
null
null
null
mayan/apps/document_states/tests/test_links.py
darrenflexxu/Mayan-EDMS
6707365bfacd137e625ddc1b990168012246fa07
[ "Apache-2.0" ]
5
2021-03-19T22:56:45.000Z
2022-03-12T00:08:43.000Z
mayan/apps/document_states/tests/test_links.py
Sumit-Kumar-Jha/mayan
5b7ddeccf080b9e41cc1074c70e27dfe447be19f
[ "Apache-2.0" ]
1
2020-07-29T21:03:27.000Z
2020-07-29T21:03:27.000Z
from __future__ import unicode_literals from django.urls import reverse from mayan.apps.documents.tests.base import GenericDocumentViewTestCase from ..links import ( link_workflow_runtime_proxy_document_list, link_workflow_runtime_proxy_list, link_workflow_runtime_proxy_state_document_list, link_workflow_runtime_proxy_state_list, ) from ..permissions import permission_workflow_view from .mixins import WorkflowTestMixin class WorkflowRuntimeProxyLinkTestCase( WorkflowTestMixin, GenericDocumentViewTestCase ): def _resolve_test_link(self, test_object=None): self.add_test_view(test_object=test_object) context = self.get_test_view() self.resolved_test_link = self.test_link.resolve(context=context) def test_workflow_runtime_proxy_document_list_link_no_permission(self): self.test_link = link_workflow_runtime_proxy_document_list self._create_test_workflow(add_document_type=True) self._resolve_test_link(test_object=self.test_workflow_runtime_proxy) self.assertEqual(self.resolved_test_link, None) def test_workflow_runtime_proxy_document_list_link_with_access(self): self.test_link = link_workflow_runtime_proxy_document_list self._create_test_workflow(add_document_type=True) self.grant_access( obj=self.test_workflow, permission=permission_workflow_view ) self._resolve_test_link(test_object=self.test_workflow_runtime_proxy) self.assertNotEqual(self.resolved_test_link, None) self.assertEqual( self.resolved_test_link.url, reverse( viewname=self.test_link.view, kwargs={ 'pk': self.test_workflow_runtime_proxy.pk } ) ) def test_workflow_runtime_proxy_link_no_permission(self): self.test_link = link_workflow_runtime_proxy_list self._create_test_workflow(add_document_type=True) self._resolve_test_link() self.assertEqual(self.resolved_test_link, None) def test_workflow_runtime_proxy_link_with_access(self): self.test_link = link_workflow_runtime_proxy_list self._create_test_workflow(add_document_type=True) self.grant_access( obj=self.test_workflow, permission=permission_workflow_view ) self._resolve_test_link() self.assertNotEqual(self.resolved_test_link, None) self.assertEqual( self.resolved_test_link.url, reverse( viewname=self.test_link.view, ) ) def test_workflow_runtime_proxy_state_document_list_link_no_permission(self): self.test_link = link_workflow_runtime_proxy_state_document_list self._create_test_workflow(add_document_type=True) self._create_test_workflow_state() self._resolve_test_link(test_object=self.test_workflow_state_runtime_proxy) self.assertEqual(self.resolved_test_link, None) def test_workflow_runtime_proxy_state_document_list_link_with_access(self): self.test_link = link_workflow_runtime_proxy_state_document_list self._create_test_workflow(add_document_type=True) self._create_test_workflow_state() self.grant_access( obj=self.test_workflow, permission=permission_workflow_view ) self._resolve_test_link(test_object=self.test_workflow_state_runtime_proxy) self.assertNotEqual(self.resolved_test_link, None) self.assertEqual( self.resolved_test_link.url, reverse( viewname=self.test_link.view, kwargs={ 'pk': self.test_workflow_state_runtime_proxy.pk } ) ) def test_workflow_runtime_proxy_state_list_link_no_permission(self): self.test_link = link_workflow_runtime_proxy_state_list self._create_test_workflow(add_document_type=True) self._resolve_test_link(test_object=self.test_workflow_runtime_proxy) self.assertEqual(self.resolved_test_link, None) def test_workflow_runtime_proxy_state_list_link_with_access(self): self.test_link = link_workflow_runtime_proxy_state_list self._create_test_workflow(add_document_type=True) self.grant_access( obj=self.test_workflow, permission=permission_workflow_view ) self._resolve_test_link(test_object=self.test_workflow_runtime_proxy) self.assertNotEqual(self.resolved_test_link, None) self.assertEqual( self.resolved_test_link.url, reverse( viewname=self.test_link.view, kwargs={ 'pk': self.test_workflow_runtime_proxy.pk } ) )
33.559441
83
0.719108
575
4,799
5.476522
0.092174
0.088917
0.165132
0.106701
0.860273
0.860273
0.829152
0.814862
0.783106
0.772944
0
0
0.219629
4,799
142
84
33.795775
0.840854
0
0
0.578431
0
0
0.00125
0
0
0
0
0
0.117647
1
0.088235
false
0
0.058824
0
0.156863
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
fe321a92bdbbf8f53fa5ccb4778ac1cbfd7bf8dc
43,210
py
Python
Visualize/plots.py
vageeshSaxena/TX-Ray
80f96012bd7ab4c789b037bbfa996fa26c160701
[ "MIT" ]
1
2020-05-24T18:28:41.000Z
2020-05-24T18:28:41.000Z
Visualize/plots.py
vageeshSaxena/TX-Ray
80f96012bd7ab4c789b037bbfa996fa26c160701
[ "MIT" ]
5
2020-09-26T00:44:54.000Z
2022-02-10T00:39:05.000Z
Visualize/plots.py
vageeshSaxena/TX-Ray
80f96012bd7ab4c789b037bbfa996fa26c160701
[ "MIT" ]
null
null
null
######################################################################################################################## """ Description : Contains methods for all the plots used in the paper TX-Ray. Python version : 3.7.3 """ ######################################################################################################################## ########################################### Importing Libraries ######################################################## import os import sys import heapq import operator import pickle from collections import Counter, OrderedDict, defaultdict import numpy as np import pandas as pd import plotly import plotly.express as px import plotly.graph_objects as go # Custom Libraries sys.path.append('../../Evaluation_metrics/') from measures import find_shared_neurons ######################################################################################################################## def mass_activation_plot(unsup_data, zero_shot_data, sup_data, data_dict): """ :param unsup_data: Unsupervised data(dtype:pandas dataframe) :param zero_shot_data: Zero shot data(dtype:pandas dataframe) :param sup_data: Supervised data(dtype:pandas dataframe) :param data_dict: dictionary containing input instructions(dtype:dict) Plots the mass activation plot and save it in data_dict["visualize"]["plot_directory"] """ if not os.path.exists(data_dict["visualize"]["plot_directory"]): os.makedirs(data_dict["visualize"]["plot_directory"]) zero_shot_neurons = list(zero_shot_data['max_activation_index'].unique()) unsup_neurons = list(unsup_data['max_activation_index'].unique()) sup_neurons = list(sup_data['max_activation_index'].unique()) zero_shot_mass_dict, unsup_mass_dict, sup_mass_dict = ({} for i in range(3)) for neuron in unsup_neurons: temp = unsup_data[unsup_data['max_activation_index']==neuron] unsup_mass_dict[neuron] = sum(temp['max_activations']) for neuron in zero_shot_neurons: temp = zero_shot_data[zero_shot_data['max_activation_index']==neuron] zero_shot_mass_dict[neuron] = sum(temp['max_activations']) for neuron in sup_neurons: temp = sup_data[sup_data['max_activation_index']==neuron] sup_mass_dict[neuron] = sum(temp['max_activations']) sup = [value[1] for value in sorted(sup_mass_dict.items(), key=operator.itemgetter(1), reverse=True)] unsup = [value[1] for value in sorted(unsup_mass_dict.items(), key=operator.itemgetter(1), reverse=True)] zshot = [value[1] for value in sorted(zero_shot_mass_dict.items(), key=operator.itemgetter(1), reverse=True)] fig = go.Figure() fig.add_trace(go.Bar(y=sup, name="sup", marker_color=data_dict['visualize']['viz_colors']['sup_color'])) fig.add_trace(go.Bar(y=unsup, name="unsup", marker_color=data_dict['visualize']['viz_colors']['unsup_color'])) fig.add_trace(go.Bar(y=zshot, name="zshot", marker_color=data_dict['visualize']['viz_colors']['zero_shot_color'])) fig.update_layout(barmode='relative', title_text='Mass activations for neurons', xaxis_title="Neurons", yaxis_title="Log mass Activations", yaxis_type="log", xaxis = go.XAxis(showticklabels=False), yaxis = go.YAxis(showticklabels=False) ) # fig.write_image(os.path.join(data_dict["visualize"]["plot_directory"], "mass_activation_plot.pdf")) plotly.offline.plot(fig, filename = os.path.join(data_dict["visualize"]["plot_directory"], "mass_activation_plot.pdf"), auto_open=False) fig.show() def freq_analysis_plot(sup_data, unsup_data_epoch1, unsup_data_epoch49, data_dict): """ :param unsup_data_epoch1: Unsupervised data for 1 epoch(dtype:pandas dataframe) :param unsup_data_epoch49: Unsupervised data for 1 epoch(dtype:pandas dataframe) :param sup_data: Supervised data(dtype:pandas dataframe) :param data_dict: dictionary containing input instructions(dtype:dict) Plots the frequency analysis plot and save it in data_dict["visualize"]["plot_directory"] """ sup = sup_data['POS'].to_dict() sup_freq = Counter(sup.values()) sup_freq = dict(OrderedDict(sup_freq.most_common())) unsup1 = unsup_data_epoch1['POS'].to_dict() unsup1_freq = Counter(unsup1.values()) unsup1_freq = dict(OrderedDict(unsup1_freq.most_common())) unsup49 = unsup_data_epoch49['POS'].to_dict() unsup49_freq = Counter(unsup49.values()) unsup49_freq = dict(OrderedDict(unsup49_freq.most_common())) unsup1_dataframe = pd.DataFrame.from_dict(unsup1_freq, orient='index', columns=['unsup-1']) unsup49_dataframe = pd.DataFrame.from_dict(unsup49_freq, orient='index', columns=['unsup-49']) sup_dataframe = pd.DataFrame.from_dict(sup_freq, orient='index', columns=['sup']) unsup1_pos = list(unsup1_dataframe.index) unsup1_pos_mass_activation = [] for pos in unsup1_pos: temp = unsup_data_epoch1[unsup_data_epoch1['POS']==pos] unsup1_pos_mass_activation.append(temp['max_activations'].sum()) sup_pos = list(sup_dataframe.index) sup_pos_mass_activation = [] for pos in sup_pos: temp = sup_data[sup_data['POS']==pos] sup_pos_mass_activation.append(temp['max_activations'].sum()) unsup49_pos = list(unsup49_dataframe.index) unsup49_pos_mass_activation = [] for pos in unsup49_pos: temp = unsup_data_epoch49[unsup_data_epoch49['POS']==pos] unsup49_pos_mass_activation.append(temp['max_activations'].sum()) unsup1_dataframe['unsup1-mass_activation'] = unsup1_pos_mass_activation unsup49_dataframe['unsup49-mass_activation'] = unsup49_pos_mass_activation sup_dataframe['sup-mass_activation'] = sup_pos_mass_activation df = unsup1_dataframe.join(sup_dataframe) df_ = df.join(unsup49_dataframe) df_.sort_values(['unsup-1'],inplace=True,ascending=False) df_['unsup corpus POS freq. %'] = df_['unsup-1'].apply(lambda x:x/df_['unsup-1'].sum()) df_['unsup epoch 1 act. mass %'] = df_['unsup1-mass_activation'].apply(lambda x:x/df_['unsup1-mass_activation'].sum()) df_['unsup epoch 49 act. mass %'] = df_['unsup49-mass_activation'].apply(lambda x:x/df_['unsup49-mass_activation'].sum()) plot_dict = df_[['unsup corpus POS freq. %','unsup epoch 1 act. mass %','unsup epoch 49 act. mass %']].to_dict() fig = go.Figure() fig.add_trace(go.Bar(x= list(plot_dict['unsup corpus POS freq. %'].keys()) , y= list(plot_dict['unsup corpus POS freq. %'].values()), name="unsup POS freq. %", marker_color='black')) fig.add_trace(go.Bar(x= list(plot_dict['unsup epoch 1 act. mass %'].keys()) , y= list(plot_dict['unsup epoch 1 act. mass %'].values()), name="unsup epoch 1 act. mass %", marker_color='gray')) fig.add_trace(go.Bar(x= list(plot_dict['unsup epoch 49 act. mass %'].keys()) , y= list(plot_dict['unsup epoch 49 act. mass %'].values()), name="unsup epoch 49 act. mass %", marker_color=data_dict['visualize']['viz_colors']['unsup_epoch_49'])) fig.update_layout(barmode='relative', title_text='% POS activations vs. % POS frequencies', xaxis_title="POS tags", yaxis_title="POS %", ) # fig.write_image(os.path.join(data_dict["visualize"]["plot_directory"], "mass_activation_plot.pdf")) plotly.offline.plot(fig, filename = os.path.join(data_dict["visualize"]["plot_directory"], "freq_activation_plot.pdf"), auto_open=False) fig.show() def hellinger_length_plot(hellinger_stats, filename): """ :param hellinger_stats: path to the savd file for the hellinger statistics from calculate_hellinger_distance function :param filename: file name with directory where the results are to be stored(dtype:str) Description: Plots a scatter plot between number of features activated for every neuron vs hellinger distance between the two models """ with open(hellinger_stats, 'rb') as handle: hellinger_dict = pickle.load(handle) num_token_list, distance_list = ([] for i in range(2)) for activation,(distance,num_tokens) in hellinger_dict.items(): num_token_list.append(num_tokens) distance_list.append(distance) fig = px.scatter(x= num_token_list ,y= distance_list) plot_title = str(len(hellinger_dict)) + " neurons activated" fig.update_layout(barmode='relative', title_text=plot_title, xaxis_title="Log Hellinger length", yaxis_title="Hellinger distance", xaxis_type="log", xaxis = go.XAxis(showticklabels=False), yaxis = go.YAxis(showticklabels=False) ) plotly.offline.plot(fig, filename = filename,auto_open=False) fig.show() def length_shift_token_plot(model1, model2, modelname1, modelname2, color1, color2, y_axis_label_model1, y_axis_label_model2, data_dict, filename): """ :param model1:data from trained model 1(dtype:dataframe) :param model2:data from trained model 2(dtype:dataframe) :param color1:color for model 1(dtype:str) :param color2:color for model 2(dtype:str) :param modelname1:model1 label(dtype:str) :param modelname2:model2 label(dtype:str) :param y_axis_label_model1:Y axis label annotation for model1(dtype:str) :param y_axis_label_model2:Y axis label annotation for model2(dtype:str) :param data_dict: dictionary containing input instructions(dtype:dict) :param filename: pickled file name and directory to store the results """ fig = go.Figure() # Collecting number of tokens in each neurons for both the model model1_tokens_dict, model2_tokens_dict = ({} for i in range(2)) for neuron in list(model1['max_activation_index'].unique()): model1_data = model1[model1['max_activation_index'] == neuron] model1_tokens_dict[neuron] = model1_data['inputs'].nunique() for neuron in list(model2['max_activation_index'].unique()): model2_data = model2[model2['max_activation_index'] == neuron] model2_tokens_dict[neuron] = model2_data['inputs'].nunique() model1_token_list, model1_y_list = ([] for i in range(2)) model2_token_list, model2_y_list = ([] for i in range(2)) # plotting scatter plot for neuron in range(data_dict['models']['pretrained_lm']['nhid']): if neuron in list(model1['max_activation_index'].unique()): model1_token_list.append(model1_tokens_dict[neuron]) model1_y_list.append(y_axis_label_model1) if neuron in list(model2['max_activation_index'].unique()): model2_token_list.append(model2_tokens_dict[neuron]) model2_y_list.append(y_axis_label_model2) fig.add_trace(go.Scatter(x=model1_token_list,y= model1_y_list, mode='markers', name=modelname1 , marker_color=color1)) fig.add_trace(go.Scatter(x= model2_token_list, y=model2_y_list, mode='markers', name=modelname2, marker_color=color2)) model1_neurons = list(model1['max_activation_index'].unique()) model2_neurons = list(model2['max_activation_index'].unique()) shared_neurons = find_shared_neurons(model1_neurons, model2_neurons) for neuron in shared_neurons: if model1_tokens_dict[neuron] > model2_tokens_dict[neuron]: color_ = data_dict['visualize']['viz_colors']['length_reduced'] elif model1_tokens_dict[neuron] == model2_tokens_dict[neuron]: color_ = 'black' else: color_ = data_dict['visualize']['viz_colors']['length_increased'] x_,y_ = [model1_tokens_dict[neuron],model2_tokens_dict[neuron]],[y_axis_label_model1 ,y_axis_label_model2] fig.add_trace(go.Scatter(x= x_, y=y_, mode='lines', marker_color=color_, name=" ")) title_text = "Length of " + str(len(shared_neurons)) + " alive neurons" fig.update_layout(showlegend=False, title_text=title_text, xaxis_type="log", xaxis_title="Log number of tokens activated") plotly.offline.plot(fig, filename = filename, auto_open=False) fig.show() def length_shift_pos_plot(model1, model2, modelname1, modelname2, color1, color2, y_axis_label_model1, y_axis_label_model2, data_dict, filename): """ :param model1:data from trained model 1(dtype:dataframe) :param model2:data from trained model 2(dtype:dataframe) :param color1:color for model 1(dtype:str) :param color2:color for model 2(dtype:str) :param modelname1:model1 label(dtype:str) :param modelname2:model2 label(dtype:str) :param y_axis_label_model1:Y axis label annotation for model1(dtype:str) :param y_axis_label_model2:Y axis label annotation for model2(dtype:str) :param data_dict: dictionary containing input instructions(dtype:dict) :param filename: pickled file name and directory to store the results """ fig = go.Figure() # Collecting number of pos in each neurons for both the model model1_pos_dict, model2_pos_dict = ({} for i in range(2)) for neuron in list(model1['max_activation_index'].unique()): model1_data = model1[model1['max_activation_index'] == neuron] model1_pos_dict[neuron] = model1_data['POS'].nunique() for neuron in list(model2['max_activation_index'].unique()): model2_data = model2[model2['max_activation_index'] == neuron] model2_pos_dict[neuron] = model2_data['POS'].nunique() model1_token_list, model1_y_list = ([] for i in range(2)) model2_token_list, model2_y_list = ([] for i in range(2)) # plotting scatter plot for neuron in range(data_dict['models']['pretrained_lm']['nhid']): if neuron in list(model1['max_activation_index'].unique()): model1_token_list.append(model1_pos_dict[neuron]) model1_y_list.append(y_axis_label_model1) if neuron in list(model2['max_activation_index'].unique()): model2_token_list.append(model2_pos_dict[neuron]) model2_y_list.append(y_axis_label_model2) fig.add_trace(go.Scatter(x=model1_token_list,y= model1_y_list, mode='markers', name=modelname1 , marker_color=color1)) fig.add_trace(go.Scatter(x= model2_token_list, y=model2_y_list, mode='markers', name=modelname2, marker_color=color2)) model1_neurons = list(model1['max_activation_index'].unique()) model2_neurons = list(model2['max_activation_index'].unique()) shared_neurons = find_shared_neurons(model1_neurons, model2_neurons) for neuron in shared_neurons: if model1_pos_dict[neuron] > model2_pos_dict[neuron]: color_ = data_dict['visualize']['viz_colors']['length_reduced'] elif model1_pos_dict[neuron] == model2_pos_dict[neuron]: color_ = 'black' else: color_ = data_dict['visualize']['viz_colors']['length_increased'] x_,y_ = [model1_pos_dict[neuron],model2_pos_dict[neuron]],[y_axis_label_model1 ,y_axis_label_model2] fig.add_trace(go.Scatter(x= x_, y=y_, mode='lines', marker_color=color_, name=" ")) title_text = "Length of " + str(len(shared_neurons)) + " alive neurons" fig.update_layout(showlegend=False, title_text=title_text, xaxis_title="number of POS activated") plotly.offline.plot(fig, filename = filename, auto_open=False) fig.show() def choose_top_pos_from_data(df): """ :param df: dataframe(dtype:pandas dataframe) :returns: a dict with the top three pos tags associated with a token in the entire dataset. """ counter_dict = {} unique_tokens = df['inputs'].unique() for token in unique_tokens: temp = df[df['inputs']==token] temp_pos = list(temp['POS']) temp_pos = [tag.strip() for tag in temp_pos] tags = Counter(temp_pos) most_common_tags = tags.most_common(3) most_common_tags = [tags[0] for tags in most_common_tags] counter_dict[token] = most_common_tags return counter_dict def plot_top_10_hellinger_neurons(hellinger_stats, model1_data, model2_data, color1, color2, modelname1, modelname2, data_dict, foldername, n_tokens=0, process_data_flag=False): """ :param hellinger_stats: path to the savd file for the hellinger statistics from calculate_hellinger_distance function :param model1_data:data from trained model 1(dtype:dataframe) :param model2_data:data from trained model 2(dtype:dataframe) :param color1:color for model 1(dtype:str) :param color2:color for model 2(dtype:str) :param modelname1:model1 label(dtype:str) :param modelname2:model2 label(dtype:str) :param data_dict: dictionary containing input instructions(dtype:dict) :param foldername: pickled file name and directory to store the results :param n_tokens: number of tokens you want to plot(dtype:int) :param process_data_flag: True if the pickle files need to be generated, False if you want to load the pickle files. :Description: Generates the plot for the top 10 neurons with highest hellinger distances in hellinger_stats """ # removing the whitespaces model1_data['POS'] = model1_data['POS'].apply(lambda x:x.replace(" ","")) model2_data['POS'] = model2_data['POS'].apply(lambda x:x.replace(" ","")) # Getting all the POS tags activated model1_pos = list(model1_data['POS'].unique()) model1_pos = list(model2_data['POS'].unique()) all_pos = set(model1_pos + model1_pos) # all_pos = [pos.strip() for pos in all_pos] # loading the Hellinger distance dictionary with open(hellinger_stats, 'rb') as handle: hellinger_dict = pickle.load(handle) top_10_neurons = heapq.nlargest(10, hellinger_dict, key=hellinger_dict.get) for neuron in top_10_neurons: path = os.path.join(data_dict["visualize"]["plot_directory"],foldername,"top_10",str(neuron)) if not os.path.exists(path): os.makedirs(path) model1_data_temp = model1_data[model1_data['max_activation_index']==neuron] model2_data_temp = model2_data[model2_data['max_activation_index']==neuron] # Getting the pos stats from all the dictionaries model1_pos_dict = dict(Counter(model1_data_temp['POS'])) model2_pos_dict = dict(Counter(model2_data_temp['POS'])) # Creating dataframe from the dictionaries model1_pos = pd.DataFrame.from_dict(model1_pos_dict, orient='index', columns=[modelname1]) model2_pos = pd.DataFrame.from_dict(model2_pos_dict, orient='index', columns=[modelname2]) # Normalizing the statistics model1_pos[modelname1] = model1_pos[modelname1].apply(lambda x: x/model1_pos[modelname1].sum()) model2_pos[modelname2] = model2_pos[modelname2].apply(lambda x: x/model2_pos[modelname2].sum()) # Merging dataframe data = [model1_pos[modelname1], model2_pos[modelname2]] df = pd.concat(data,axis=1) # Again converting the dataframe to dictionary for further computations. all_pos_stats = df.to_dict() # Getting all the pos stats into a dictionary for viz_data in all_pos_stats.keys(): for tags in all_pos: if tags not in all_pos_stats[viz_data].keys(): all_pos_stats[viz_data][tags] = None # Converting pos stats to a dataframe # all_pos_stats = pd.DataFrame.from_dict(all_pos_stats) if process_data_flag == True: # Getting the data. model1_neurondata = model1_data[model1_data['max_activation_index']==neuron] model1_neurondata['POS'] = model1_neurondata['POS'].apply(lambda x: x.strip()) model2_neurondata = model2_data[model2_data['max_activation_index']==neuron] model2_neurondata['POS'] = model2_neurondata['POS'].apply(lambda x: x.strip()) # Converting the other pos tags to the top three ones model1_top_pos = choose_top_pos_from_data(model1_neurondata) model2_top_pos = choose_top_pos_from_data(model2_neurondata) model1_tokens = list(model1_neurondata['inputs']) model1_pos = list(model1_neurondata['POS']) model2_tokens = list(model2_neurondata['inputs']) model2_pos = list(model2_neurondata['POS']) for index, pos in enumerate(model1_pos): if pos not in model1_top_pos[model1_tokens[index]]: model1_pos[index] = model1_top_pos[model1_tokens[index]][0] for index, pos in enumerate(model2_pos): if pos not in model2_top_pos[model2_tokens[index]]: model2_pos[index] = model2_top_pos[model2_tokens[index]][0] model1_neurondata['POS'] = model1_pos model2_neurondata['POS'] = model2_pos # Getting all the unique tokens model1_unique_tokens = model1_neurondata["inputs"].unique() model2_unique_tokens = model2_neurondata["inputs"].unique() model1_dict,model2_dict = ({} for i in range(2)) # Generating model1 visualization # Getting mean for all the unique tokens for tokens in model1_unique_tokens: temp_df = model1_neurondata[model1_neurondata["inputs"] == tokens] pos = list(temp_df["POS"].unique()) activation_temp = [] for unique_pos in pos: activation_temp.append(temp_df[temp_df['POS']==unique_pos]["max_activations"].mean()) model1_dict[tokens] = {"POS":pos, "activation":activation_temp} # Getting the top 20 activation tokens model1_top_20 = {} temp_activations, temp_tokens = ([] for i in range(2)) for key, value in model1_dict.items(): for index in range(len(value['POS'])): temp_tokens.append(key) temp_activations.append(value['activation'][index]) model1_top_20_activation_index = sorted(range(len(temp_activations)), key=lambda x: temp_activations[x])[-n_tokens:] for indexes in model1_top_20_activation_index: model1_top_20[temp_tokens[indexes]] = model1_dict[temp_tokens[indexes]] # Flipping the dictionary to get it in the order of {pos-tags:list(tuple(token,mean_activations))} model1_token_dict = defaultdict(list) for token,stats in model1_top_20.items(): for index,value in enumerate(stats['POS']): model1_token_dict[stats['POS'][index]].append((token,stats['activation'][index])) # Adding the null features for the tags not present for tags in all_pos: if tags not in model1_token_dict.keys(): model1_token_dict[tags].append((' ',0.0)) # Sorting dict on the basis of the names sorted_model1_dict = {} for key in sorted(model1_token_dict.keys()): sorted_model1_dict[key] = model1_token_dict[key] with open(os.path.join(path,'model1_data.pickle'), 'wb') as handle: pickle.dump(sorted_model1_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) # Generating model2 visualization # Getting mean for all the unique tokens for tokens in model2_unique_tokens: temp_df = model2_neurondata[model2_neurondata["inputs"] == tokens] pos = list(temp_df["POS"].unique()) activation_temp = [] for unique_pos in pos: activation_temp.append(temp_df[temp_df['POS']==unique_pos]["max_activations"].mean()) model2_dict[tokens] = {"POS":pos, "activation":activation_temp} # Getting the top 20 activation tokens model2_top_20 = {} temp_activations, temp_tokens = ([] for i in range(2)) for key, value in model2_dict.items(): for index in range(len(value['POS'])): temp_tokens.append(key) temp_activations.append(value['activation'][index]) model2_top_20_activation_index = sorted(range(len(temp_activations)), key=lambda x: temp_activations[x])[-n_tokens:] for indexes in model2_top_20_activation_index: model2_top_20[temp_tokens[indexes]] = model2_dict[temp_tokens[indexes]] # Flipping the dictionary to get it in the order of {pos-tags:list(tuple(token,mean_activations))} model2_token_dict = defaultdict(list) for token,stats in model2_top_20.items(): for index,value in enumerate(stats['POS']): model2_token_dict[stats['POS'][index]].append((token,stats['activation'][index])) # Adding the null features for the tags not present for tags in all_pos: if tags not in model2_token_dict.keys(): model2_token_dict[tags].append((' ',0.0)) # Sorting dict on the basis of the names sorted_model2_dict = {} for key in sorted(model2_token_dict.keys()): sorted_model2_dict[key] = model2_token_dict[key] with open(os.path.join(path,'model2_data.pickle'), 'wb') as handle: pickle.dump(sorted_model2_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) else: # loading the dictionary with open(os.path.join(path,'model1_data.pickle'), 'rb') as handle: sorted_model1_dict = pickle.load(handle) with open(os.path.join(path,'model2_data.pickle'), 'rb') as handle: sorted_model2_dict = pickle.load(handle) fig = go.Figure() # Plotting the bar plot fig.add_trace(go.Bar(x=list(all_pos_stats[modelname1].keys()), y=list(all_pos_stats[modelname1].values()), name=modelname1, marker_color=color1, opacity=0.6)) fig.add_trace(go.Bar(x=list(all_pos_stats[modelname2].keys()), y=list(all_pos_stats[modelname2].values()), name=modelname2, marker_color=color2, opacity=0.6)) # Plotting the tokens on the bar plot pos_model1 = list(sorted_model1_dict.keys()) values_model1 = list(sorted_model1_dict.values()) pos_model2 = list(sorted_model2_dict.keys()) values_model2 = list(sorted_model2_dict.values()) model1_value = [[(value[0],np.nan) if value[1]==0.0 else (value[0],value[1]) for value in pairs] for pairs in values_model1] model2_value = [[(value[0],np.nan) if value[1]==0.0 else (value[0],value[1]) for value in pairs] for pairs in values_model2] model1_token = [[value[0] for value in pairs] for pairs in model1_value] model1_activations = [[value[1] for value in pairs] for pairs in model1_value] model2_token = [[value[0] for value in pairs] for pairs in model2_value] model2_activations = [[value[1] for value in pairs] for pairs in model2_value] pos_model1_list, activation_model1_list, token_model1_list = ([] for i in range(3)) for index in range(len(pos_model1)): for activation_list_index, activation in enumerate(model1_activations[index]): if activation >= 0.0: pos_model1_list.append(pos_model1[index]) activation_model1_list.append(activation) token_model1_list.append(model1_token[index][activation_list_index]) fig.add_trace(go.Scatter(x=pos_model1_list, y=activation_model1_list, text=token_model1_list, mode='markers+text', marker_color=color1, name=modelname1, textfont={'color':color1})) pos_model2_list, activation_model2_list, token_model2_list = ([] for i in range(3)) for index in range(len(pos_model2)): for activation_list_index, activation in enumerate(model2_activations[index]): if activation >= 0.0: pos_model2_list.append(pos_model2[index]) activation_model2_list.append(activation) token_model2_list.append(model2_token[index][activation_list_index]) fig.add_trace(go.Scatter(x=pos_model2_list, y=activation_model2_list, text=token_model2_list, mode='markers+text', marker_color=color2, name=modelname2, textfont={'color':color2})) fig.update_layout(title_text='Hellinger plot for ' + str(neuron) + "-neuron" , xaxis_title="POS-tags", yaxis_title="Activation", xaxis = go.XAxis(showticklabels=True), yaxis = go.YAxis(showticklabels=True) ) plotly.offline.plot(fig, filename = os.path.join(path,str(neuron)+".pdf"), auto_open=False) fig.show() def plot_least_10_hellinger_neurons(hellinger_stats, model1_data, model2_data, color1, color2, modelname1, modelname2, data_dict, foldername, n_tokens=0, process_data_flag=False): """ :param hellinger_stats: path to the savd file for the hellinger statistics from calculate_hellinger_distance function :param model1_data:data from trained model 1(dtype:dataframe) :param model2_data:data from trained model 2(dtype:dataframe) :param color1:color for model 1(dtype:str) :param color2:color for model 2(dtype:str) :param modelname1:model1 label(dtype:str) :param modelname2:model2 label(dtype:str) :param data_dict: dictionary containing input instructions(dtype:dict) :param foldername: pickled file name and directory to store the results :param n_tokens: number of tokens you want to plot(dtype:int) :param process_data_flag: True if the pickle files need to be generated, False if you want to load the pickle files. :Description: Generates the plot for the least 10 neurons with highest hellinger distances in hellinger_stats """ # removing the whitespaces model1_data['POS'] = model1_data['POS'].apply(lambda x:x.replace(" ","")) model2_data['POS'] = model2_data['POS'].apply(lambda x:x.replace(" ","")) # Getting all the POS tags activated model1_pos = list(model1_data['POS'].unique()) model1_pos = list(model2_data['POS'].unique()) all_pos = set(model1_pos + model1_pos) # all_pos = [pos.strip() for pos in all_pos] # loading the Hellinger distance dictionary with open(hellinger_stats, 'rb') as handle: hellinger_dict = pickle.load(handle) least_10_neurons = heapq.nsmallest(10, hellinger_dict, key=hellinger_dict.get) for neuron in least_10_neurons: path = os.path.join(data_dict["visualize"]["plot_directory"],foldername,"least_10",str(neuron)) if not os.path.exists(path): os.makedirs(path) model1_data_temp = model1_data[model1_data['max_activation_index']==neuron] model2_data_temp = model2_data[model2_data['max_activation_index']==neuron] # Getting the pos stats from all the dictionaries model1_pos_dict = dict(Counter(model1_data_temp['POS'])) model2_pos_dict = dict(Counter(model2_data_temp['POS'])) # Creating dataframe from the dictionaries model1_pos = pd.DataFrame.from_dict(model1_pos_dict, orient='index', columns=[modelname1]) model2_pos = pd.DataFrame.from_dict(model2_pos_dict, orient='index', columns=[modelname2]) # Normalizing the statistics model1_pos[modelname1] = model1_pos[modelname1].apply(lambda x: x/model1_pos[modelname1].sum()) model2_pos[modelname2] = model2_pos[modelname2].apply(lambda x: x/model2_pos[modelname2].sum()) # Merging dataframe data = [model1_pos[modelname1], model2_pos[modelname2]] df = pd.concat(data,axis=1) # Again converting the dataframe to dictionary for further computations. all_pos_stats = df.to_dict() # Getting all the pos stats into a dictionary for viz_data in all_pos_stats.keys(): for tags in all_pos: if tags not in all_pos_stats[viz_data].keys(): all_pos_stats[viz_data][tags] = None # Converting pos stats to a dataframe # all_pos_stats = pd.DataFrame.from_dict(all_pos_stats) if process_data_flag == True: # Getting the data. model1_neurondata = model1_data[model1_data['max_activation_index']==neuron] model1_neurondata['POS'] = model1_neurondata['POS'].apply(lambda x: x.strip()) model2_neurondata = model2_data[model2_data['max_activation_index']==neuron] model2_neurondata['POS'] = model2_neurondata['POS'].apply(lambda x: x.strip()) # Converting the other pos tags to the least three ones model1_least_pos = choose_top_pos_from_data(model1_neurondata) model2_least_pos = choose_top_pos_from_data(model2_neurondata) model1_tokens = list(model1_neurondata['inputs']) model1_pos = list(model1_neurondata['POS']) model2_tokens = list(model2_neurondata['inputs']) model2_pos = list(model2_neurondata['POS']) for index, pos in enumerate(model1_pos): if pos not in model1_least_pos[model1_tokens[index]]: model1_pos[index] = model1_least_pos[model1_tokens[index]][0] for index, pos in enumerate(model2_pos): if pos not in model2_least_pos[model2_tokens[index]]: model2_pos[index] = model2_least_pos[model2_tokens[index]][0] model1_neurondata['POS'] = model1_pos model2_neurondata['POS'] = model2_pos # Getting all the unique tokens model1_unique_tokens = model1_neurondata["inputs"].unique() model2_unique_tokens = model2_neurondata["inputs"].unique() model1_dict,model2_dict = ({} for i in range(2)) # Generating model1 visualization # Getting mean for all the unique tokens for tokens in model1_unique_tokens: temp_df = model1_neurondata[model1_neurondata["inputs"] == tokens] pos = list(temp_df["POS"].unique()) activation_temp = [] for unique_pos in pos: activation_temp.append(temp_df[temp_df['POS']==unique_pos]["max_activations"].mean()) model1_dict[tokens] = {"POS":pos, "activation":activation_temp} # Getting the least 20 activation tokens model1_least_20 = {} temp_activations, temp_tokens = ([] for i in range(2)) for key, value in model1_dict.items(): for index in range(len(value['POS'])): temp_tokens.append(key) temp_activations.append(value['activation'][index]) model1_least_20_activation_index = sorted(range(len(temp_activations)), key=lambda x: temp_activations[x])[-n_tokens:] for indexes in model1_least_20_activation_index: model1_least_20[temp_tokens[indexes]] = model1_dict[temp_tokens[indexes]] # Flipping the dictionary to get it in the order of {pos-tags:list(tuple(token,mean_activations))} model1_token_dict = defaultdict(list) for token,stats in model1_least_20.items(): for index,value in enumerate(stats['POS']): model1_token_dict[stats['POS'][index]].append((token,stats['activation'][index])) # Adding the null features for the tags not present for tags in all_pos: if tags not in model1_token_dict.keys(): model1_token_dict[tags].append((' ',0.0)) # Sorting dict on the basis of the names sorted_model1_dict = {} for key in sorted(model1_token_dict.keys()): sorted_model1_dict[key] = model1_token_dict[key] with open(os.path.join(path,'model1_data.pickle'), 'wb') as handle: pickle.dump(sorted_model1_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) # Generating model2 visualization # Getting mean for all the unique tokens for tokens in model2_unique_tokens: temp_df = model2_neurondata[model2_neurondata["inputs"] == tokens] pos = list(temp_df["POS"].unique()) activation_temp = [] for unique_pos in pos: activation_temp.append(temp_df[temp_df['POS']==unique_pos]["max_activations"].mean()) model2_dict[tokens] = {"POS":pos, "activation":activation_temp} # Getting the least 20 activation tokens model2_least_20 = {} temp_activations, temp_tokens = ([] for i in range(2)) for key, value in model2_dict.items(): for index in range(len(value['POS'])): temp_tokens.append(key) temp_activations.append(value['activation'][index]) model2_least_20_activation_index = sorted(range(len(temp_activations)), key=lambda x: temp_activations[x])[-n_tokens:] for indexes in model2_least_20_activation_index: model2_least_20[temp_tokens[indexes]] = model2_dict[temp_tokens[indexes]] # Flipping the dictionary to get it in the order of {pos-tags:list(tuple(token,mean_activations))} model2_token_dict = defaultdict(list) for token,stats in model2_least_20.items(): for index,value in enumerate(stats['POS']): model2_token_dict[stats['POS'][index]].append((token,stats['activation'][index])) # Adding the null features for the tags not present for tags in all_pos: if tags not in model2_token_dict.keys(): model2_token_dict[tags].append((' ',0.0)) # Sorting dict on the basis of the names sorted_model2_dict = {} for key in sorted(model2_token_dict.keys()): sorted_model2_dict[key] = model2_token_dict[key] with open(os.path.join(path,'model2_data.pickle'), 'wb') as handle: pickle.dump(sorted_model2_dict, handle, protocol=pickle.HIGHEST_PROTOCOL) else: # loading the dictionary with open(os.path.join(path,'model1_data.pickle'), 'rb') as handle: sorted_model1_dict = pickle.load(handle) with open(os.path.join(path,'model2_data.pickle'), 'rb') as handle: sorted_model2_dict = pickle.load(handle) fig = go.Figure() # Plotting the bar plot fig.add_trace(go.Bar(x=list(all_pos_stats[modelname1].keys()), y=list(all_pos_stats[modelname1].values()), name=modelname1, marker_color=color1, opacity=0.6)) fig.add_trace(go.Bar(x=list(all_pos_stats[modelname2].keys()), y=list(all_pos_stats[modelname2].values()), name=modelname2, marker_color=color2, opacity=0.6)) # Plotting the tokens on the bar plot pos_model1 = list(sorted_model1_dict.keys()) values_model1 = list(sorted_model1_dict.values()) pos_model2 = list(sorted_model2_dict.keys()) values_model2 = list(sorted_model2_dict.values()) model1_value = [[(value[0],np.nan) if value[1]==0.0 else (value[0],value[1]) for value in pairs] for pairs in values_model1] model2_value = [[(value[0],np.nan) if value[1]==0.0 else (value[0],value[1]) for value in pairs] for pairs in values_model2] model1_token = [[value[0] for value in pairs] for pairs in model1_value] model1_activations = [[value[1] for value in pairs] for pairs in model1_value] model2_token = [[value[0] for value in pairs] for pairs in model2_value] model2_activations = [[value[1] for value in pairs] for pairs in model2_value] pos_model1_list, activation_model1_list, token_model1_list = ([] for i in range(3)) for index in range(len(pos_model1)): for activation_list_index, activation in enumerate(model1_activations[index]): pos_model1_list.append(pos_model1[index]) activation_model1_list.append(activation) token_model1_list.append(model1_token[index][activation_list_index]) fig.add_trace(go.Scatter(x=pos_model1_list, y=activation_model1_list, text=token_model1_list, mode='markers+text', marker_color=color1, name=modelname1, textfont={'color':color1})) pos_model2_list, activation_model2_list, token_model2_list = ([] for i in range(3)) for index in range(len(pos_model2)): for activation_list_index, activation in enumerate(model2_activations[index]): pos_model2_list.append(pos_model2[index]) activation_model2_list.append(activation) token_model2_list.append(model2_token[index][activation_list_index]) fig.add_trace(go.Scatter(x=pos_model2_list, y=activation_model2_list, text=token_model2_list, mode='markers+text', marker_color=color2, name=modelname2, textfont={'color':color2})) fig.update_layout(title_text='Hellinger plot for ' + str(neuron) + "-neuron" , xaxis_title="POS-tags", yaxis_title="Activation", xaxis = go.XAxis(showticklabels=True), yaxis = go.YAxis(showticklabels=True) ) plotly.offline.plot(fig, filename = os.path.join(path,str(neuron)+".pdf"), auto_open=False) fig.show()
53.677019
132
0.635871
5,435
43,210
4.804232
0.05483
0.026426
0.020681
0.009957
0.883612
0.849182
0.834782
0.818774
0.797633
0.777182
0
0.025471
0.2486
43,210
805
133
53.677019
0.778736
0.164476
0
0.653257
0
0
0.085029
0.005903
0
0
0
0
0
1
0.015326
false
0
0.022989
0
0.04023
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
fe7df7af8ab7202a62953a8d0f1ff8aa85d9686c
407
py
Python
doc/examples/named.py
aaronsewall/pytest-dependency
db34c5451891629ad54a18e8a5e6a45b7ec968f8
[ "Apache-2.0" ]
91
2017-01-30T16:05:13.000Z
2022-03-29T12:17:35.000Z
doc/examples/named.py
aaronsewall/pytest-dependency
db34c5451891629ad54a18e8a5e6a45b7ec968f8
[ "Apache-2.0" ]
63
2016-04-21T19:30:32.000Z
2022-03-30T13:17:42.000Z
doc/examples/named.py
aaronsewall/pytest-dependency
db34c5451891629ad54a18e8a5e6a45b7ec968f8
[ "Apache-2.0" ]
29
2017-09-24T17:22:02.000Z
2022-03-30T20:39:49.000Z
import pytest @pytest.mark.dependency(name="a") @pytest.mark.xfail(reason="deliberate fail") def test_a(): assert False @pytest.mark.dependency(name="b") def test_b(): pass @pytest.mark.dependency(name="c", depends=["a"]) def test_c(): pass @pytest.mark.dependency(name="d", depends=["b"]) def test_d(): pass @pytest.mark.dependency(name="e", depends=["b", "c"]) def test_e(): pass
17.695652
53
0.660934
61
407
4.327869
0.327869
0.227273
0.378788
0.454545
0.318182
0
0
0
0
0
0
0
0.132678
407
22
54
18.5
0.747875
0
0
0.235294
0
0
0.058968
0
0
0
0
0
0.058824
1
0.294118
true
0.235294
0.058824
0
0.352941
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
0
0
0
0
0
6
fe94157ed2ebb63e0df4a7d49fbcfd8453c908ca
2,536
py
Python
ramjet/models/single_layer_model.py
golmschenk/ramjet
77fb4481a15088923308fda09804d80455d1a9cf
[ "Apache-2.0" ]
3
2020-11-23T18:47:37.000Z
2021-08-05T17:45:51.000Z
ramjet/models/single_layer_model.py
golmschenk/ramjet
77fb4481a15088923308fda09804d80455d1a9cf
[ "Apache-2.0" ]
5
2021-08-19T00:54:57.000Z
2022-02-10T00:15:40.000Z
ramjet/models/single_layer_model.py
golmschenk/ramjet
77fb4481a15088923308fda09804d80455d1a9cf
[ "Apache-2.0" ]
3
2019-07-12T21:00:57.000Z
2020-06-03T22:18:13.000Z
from tensorflow import sigmoid from tensorflow.keras import Model from tensorflow.keras.layers import Dense, Reshape, Flatten, Concatenate class SingleLayerModel(Model): def __init__(self): super().__init__() self.flatten = Flatten() self.dense = Dense(1, activation=sigmoid) def call(self, inputs, training=False, mask=None): """ The forward pass of the layer. :param inputs: The input tensor. :param training: A boolean specifying if the layer should be in training mode. :param mask: A mask for the input tensor. :return: The output tensor of the layer. """ x = inputs x = self.flatten(x) x = self.dense(x, training=training) outputs = x return outputs class SingleLayerModelWithAuxiliary(Model): def __init__(self, number_of_label_values: int = 1): super().__init__() self.flatten_light_curve = Flatten() self.concatenate = Concatenate() self.dense = Dense(number_of_label_values, activation=sigmoid) def call(self, inputs, training=False, mask=None): """ The forward pass of the layer. :param inputs: The input tensor. :param training: A boolean specifying if the layer should be in training mode. :param mask: A mask for the input tensor. :return: The output tensor of the laye1. """ light_curve, auxiliary_information = inputs x = self.flatten_light_curve(light_curve) x = self.concatenate([x, auxiliary_information]) x = self.dense(x, training=training) outputs = x return outputs class SingleLayerModelLinearWithAuxiliary(Model): def __init__(self, number_of_label_values: int = 1): super().__init__() self.flatten_light_curve = Flatten() self.concatenate = Concatenate() self.dense = Dense(number_of_label_values) def call(self, inputs, training=False, mask=None): """ The forward pass of the layer. :param inputs: The input tensor. :param training: A boolean specifying if the layer should be in training mode. :param mask: A mask for the input tensor. :return: The output tensor of the laye1. """ light_curve, auxiliary_information = inputs x = self.flatten_light_curve(light_curve) x = self.concatenate([x, auxiliary_information]) x = self.dense(x, training=training) outputs = x return outputs
34.27027
86
0.645505
308
2,536
5.146104
0.185065
0.025237
0.052997
0.04795
0.819558
0.819558
0.819558
0.819558
0.819558
0.819558
0
0.002701
0.27011
2,536
73
87
34.739726
0.853593
0.268139
0
0.675
0
0
0
0
0
0
0
0
0
1
0.15
false
0
0.075
0
0.375
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
fea9075f766fdeec6fca8be25d40e2d3675d2f15
81
py
Python
asn1PERser/classes/data/builtin/UTF8String.py
erupikus/asn1PERser
11dc2985107a9fbba00bea83c1021d2665e8f193
[ "MIT" ]
3
2021-06-14T03:29:37.000Z
2021-11-15T09:45:11.000Z
asn1PERser/classes/data/builtin/UTF8String.py
erupikus/asn1PERser
11dc2985107a9fbba00bea83c1021d2665e8f193
[ "MIT" ]
null
null
null
asn1PERser/classes/data/builtin/UTF8String.py
erupikus/asn1PERser
11dc2985107a9fbba00bea83c1021d2665e8f193
[ "MIT" ]
null
null
null
from pyasn1.type.char import UTF8String class UTF8String(UTF8String): pass
13.5
39
0.777778
10
81
6.3
0.8
0
0
0
0
0
0
0
0
0
0
0.058824
0.160494
81
5
40
16.2
0.867647
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
fea9d6630d6122d0e24cffd06c97c3797b16abc4
4,384
py
Python
google/cloud/gaming_v1beta/types/__init__.py
Yingxin-Jiang/python-game-servers
7b897b44024a8f06467ad8433bfe900feaee4b04
[ "Apache-2.0" ]
null
null
null
google/cloud/gaming_v1beta/types/__init__.py
Yingxin-Jiang/python-game-servers
7b897b44024a8f06467ad8433bfe900feaee4b04
[ "Apache-2.0" ]
null
null
null
google/cloud/gaming_v1beta/types/__init__.py
Yingxin-Jiang/python-game-servers
7b897b44024a8f06467ad8433bfe900feaee4b04
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .common import ( OperationMetadata, OperationStatus, LabelSelector, RealmSelector, Schedule, SpecSource, TargetDetails, TargetState, DeployedFleetDetails, ) from .game_server_clusters import ( ListGameServerClustersRequest, ListGameServerClustersResponse, GetGameServerClusterRequest, CreateGameServerClusterRequest, PreviewCreateGameServerClusterRequest, PreviewCreateGameServerClusterResponse, DeleteGameServerClusterRequest, PreviewDeleteGameServerClusterRequest, PreviewDeleteGameServerClusterResponse, UpdateGameServerClusterRequest, PreviewUpdateGameServerClusterRequest, PreviewUpdateGameServerClusterResponse, GameServerClusterConnectionInfo, GkeClusterReference, GameServerCluster, ) from .game_server_configs import ( ListGameServerConfigsRequest, ListGameServerConfigsResponse, GetGameServerConfigRequest, CreateGameServerConfigRequest, DeleteGameServerConfigRequest, ScalingConfig, FleetConfig, GameServerConfig, ) from .game_server_deployments import ( ListGameServerDeploymentsRequest, ListGameServerDeploymentsResponse, GetGameServerDeploymentRequest, GetGameServerDeploymentRolloutRequest, CreateGameServerDeploymentRequest, DeleteGameServerDeploymentRequest, UpdateGameServerDeploymentRequest, UpdateGameServerDeploymentRolloutRequest, FetchDeploymentStateRequest, FetchDeploymentStateResponse, GameServerDeployment, GameServerConfigOverride, GameServerDeploymentRollout, PreviewGameServerDeploymentRolloutRequest, PreviewGameServerDeploymentRolloutResponse, ) from .realms import ( ListRealmsRequest, ListRealmsResponse, GetRealmRequest, CreateRealmRequest, DeleteRealmRequest, UpdateRealmRequest, PreviewRealmUpdateRequest, PreviewRealmUpdateResponse, Realm, ) __all__ = ( "OperationMetadata", "OperationStatus", "LabelSelector", "RealmSelector", "Schedule", "SpecSource", "TargetDetails", "TargetState", "DeployedFleetDetails", "ListGameServerClustersRequest", "ListGameServerClustersResponse", "GetGameServerClusterRequest", "CreateGameServerClusterRequest", "PreviewCreateGameServerClusterRequest", "PreviewCreateGameServerClusterResponse", "DeleteGameServerClusterRequest", "PreviewDeleteGameServerClusterRequest", "PreviewDeleteGameServerClusterResponse", "UpdateGameServerClusterRequest", "PreviewUpdateGameServerClusterRequest", "PreviewUpdateGameServerClusterResponse", "GameServerClusterConnectionInfo", "GkeClusterReference", "GameServerCluster", "ListGameServerConfigsRequest", "ListGameServerConfigsResponse", "GetGameServerConfigRequest", "CreateGameServerConfigRequest", "DeleteGameServerConfigRequest", "ScalingConfig", "FleetConfig", "GameServerConfig", "ListGameServerDeploymentsRequest", "ListGameServerDeploymentsResponse", "GetGameServerDeploymentRequest", "GetGameServerDeploymentRolloutRequest", "CreateGameServerDeploymentRequest", "DeleteGameServerDeploymentRequest", "UpdateGameServerDeploymentRequest", "UpdateGameServerDeploymentRolloutRequest", "FetchDeploymentStateRequest", "FetchDeploymentStateResponse", "GameServerDeployment", "GameServerConfigOverride", "GameServerDeploymentRollout", "PreviewGameServerDeploymentRolloutRequest", "PreviewGameServerDeploymentRolloutResponse", "ListRealmsRequest", "ListRealmsResponse", "GetRealmRequest", "CreateRealmRequest", "DeleteRealmRequest", "UpdateRealmRequest", "PreviewRealmUpdateRequest", "PreviewRealmUpdateResponse", "Realm", )
30.444444
74
0.774863
227
4,384
14.920705
0.581498
0.017715
0.0124
0.009448
0.832005
0.832005
0.832005
0.832005
0.725125
0.559787
0
0.002442
0.159215
4,384
143
75
30.657343
0.916441
0.12979
0
0
0
0
0.370887
0.285338
0
0
0
0
0
1
0
false
0
0.040323
0
0.040323
0
0
0
1
null
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
feb280673713551b2143c79082d21a56f422c03b
91
py
Python
test.py
mayboyxxx/wafw00f
da24468f930b72274cf618066cd6b6bb98e3e3b3
[ "BSD-3-Clause" ]
null
null
null
test.py
mayboyxxx/wafw00f
da24468f930b72274cf618066cd6b6bb98e3e3b3
[ "BSD-3-Clause" ]
null
null
null
test.py
mayboyxxx/wafw00f
da24468f930b72274cf618066cd6b6bb98e3e3b3
[ "BSD-3-Clause" ]
null
null
null
# -*- coding:utf-8 -*- #coding=utf-8 import sys reload(sys) sys.setdefaultencoding('utf8')
15.166667
30
0.692308
13
91
4.846154
0.615385
0.285714
0.31746
0
0
0
0
0
0
0
0
0.036585
0.098901
91
6
30
15.166667
0.731707
0.351648
0
0
0
0
0.070175
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
228cb2d42081d1ac5e199344124c6632d606e385
14,777
py
Python
tests/test_invoice.py
ComFreight/cmft-stripe-integration
85a2e14dcd6fffd24e999b1f383dd7eb006606e0
[ "MIT" ]
null
null
null
tests/test_invoice.py
ComFreight/cmft-stripe-integration
85a2e14dcd6fffd24e999b1f383dd7eb006606e0
[ "MIT" ]
null
null
null
tests/test_invoice.py
ComFreight/cmft-stripe-integration
85a2e14dcd6fffd24e999b1f383dd7eb006606e0
[ "MIT" ]
null
null
null
""" .. module:: dj-stripe.tests.test_invoice :synopsis: dj-stripe Invoice Model Tests. .. moduleauthor:: Alex Kavanaugh (@kavdev) .. moduleauthor:: Lee Skillen (@lskillen) """ from __future__ import absolute_import, division, print_function, unicode_literals from copy import deepcopy from django.conf import settings from django.contrib.auth import get_user_model from django.test.testcases import TestCase from mock import ANY, patch from stripe.error import InvalidRequestError from djstripe.models import Invoice, Plan, Subscription, UpcomingInvoice from . import ( FAKE_CHARGE, FAKE_CUSTOMER, FAKE_INVOICE, FAKE_INVOICEITEM_II, FAKE_PLAN, FAKE_SUBSCRIPTION, FAKE_UPCOMING_INVOICE, default_account ) class InvoiceTest(TestCase): def setUp(self): self.account = default_account() self.user = get_user_model().objects.create_user(username="pydanny", email="pydanny@gmail.com") self.customer = FAKE_CUSTOMER.create_for_user(self.user) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_str(self, charge_retrieve_mock, subscription_retrive_mock, default_account_mock): default_account_mock.return_value = self.account invoice = Invoice.sync_from_stripe_data(deepcopy(FAKE_INVOICE)) self.assertEqual(invoice.get_stripe_dashboard_url(), self.customer.get_stripe_dashboard_url()) self.assertEqual(str(invoice), "Invoice #XXXXXXX-0001") @patch("stripe.Invoice.retrieve") @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_retry_true(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock, invoice_retrieve_mock): default_account_mock.return_value = self.account fake_invoice = deepcopy(FAKE_INVOICE) fake_invoice.update({"paid": False, "closed": False}) invoice_retrieve_mock.return_value = fake_invoice invoice = Invoice.sync_from_stripe_data(fake_invoice) return_value = invoice.retry() invoice_retrieve_mock.assert_called_once_with(id=invoice.stripe_id, api_key=settings.STRIPE_SECRET_KEY, expand=None) self.assertTrue(return_value) @patch("stripe.Invoice.retrieve") @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_retry_false(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock, invoice_retrieve_mock): default_account_mock.return_value = self.account fake_invoice = deepcopy(FAKE_INVOICE) invoice_retrieve_mock.return_value = fake_invoice invoice = Invoice.sync_from_stripe_data(fake_invoice) return_value = invoice.retry() self.assertFalse(invoice_retrieve_mock.called) self.assertFalse(return_value) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_status_paid(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice = Invoice.sync_from_stripe_data(deepcopy(FAKE_INVOICE)) self.assertEqual(Invoice.STATUS_PAID, invoice.status) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_status_open(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice_data.update({"paid": False, "closed": False}) invoice = Invoice.sync_from_stripe_data(invoice_data) self.assertEqual(Invoice.STATUS_OPEN, invoice.status) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_status_forgiven(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice_data.update({"paid": False, "closed": False, "forgiven": True}) invoice = Invoice.sync_from_stripe_data(invoice_data) self.assertEqual(Invoice.STATUS_FORGIVEN, invoice.status) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_status_closed(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice_data.update({"paid": False}) invoice = Invoice.sync_from_stripe_data(invoice_data) self.assertEqual(Invoice.STATUS_CLOSED, invoice.status) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN)) @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_sync_no_subscription(self, charge_retrieve_mock, subscription_retrieve_mock, plan_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice_data.update({"subscription": None}) invoice = Invoice.sync_from_stripe_data(invoice_data) self.assertEqual(None, invoice.subscription) charge_retrieve_mock.assert_called_once_with(api_key=ANY, expand=ANY, id=FAKE_CHARGE["id"]) plan_retrieve_mock.assert_called_once_with(api_key=ANY, expand=ANY, id=FAKE_PLAN["id"]) subscription_retrieve_mock.assert_not_called() @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_invoice_with_subscription_invoice_items(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice = Invoice.sync_from_stripe_data(invoice_data) items = invoice.invoiceitems.all() self.assertEqual(1, len(items)) item_id = "{invoice_id}-{subscription_id}".format(invoice_id=invoice.stripe_id, subscription_id=FAKE_SUBSCRIPTION["id"]) self.assertEqual(item_id, items[0].stripe_id) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_invoice_with_no_invoice_items(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice_data["lines"] = [] invoice = Invoice.sync_from_stripe_data(invoice_data) self.assertIsNotNone(invoice.plan) # retrieved from invoice item self.assertEqual(FAKE_PLAN["id"], invoice.plan.stripe_id) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_invoice_with_non_subscription_invoice_items(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice_data["lines"]["data"].append(deepcopy(FAKE_INVOICEITEM_II)) invoice_data["lines"]["total_count"] += 1 invoice = Invoice.sync_from_stripe_data(invoice_data) self.assertIsNotNone(invoice) self.assertEqual(2, len(invoice.invoiceitems.all())) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_invoice_plan_from_invoice_items(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice = Invoice.sync_from_stripe_data(invoice_data) self.assertIsNotNone(invoice.plan) # retrieved from invoice item self.assertEqual(FAKE_PLAN["id"], invoice.plan.stripe_id) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_invoice_plan_from_subscription(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice_data["lines"]["data"][0]["plan"] = None invoice = Invoice.sync_from_stripe_data(invoice_data) self.assertIsNotNone(invoice.plan) # retrieved from subscription self.assertEqual(FAKE_PLAN["id"], invoice.plan.stripe_id) @patch("djstripe.models.Account.get_default_account") @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Charge.retrieve", return_value=deepcopy(FAKE_CHARGE)) def test_invoice_without_plan(self, charge_retrieve_mock, subscription_retrieve_mock, default_account_mock): default_account_mock.return_value = self.account invoice_data = deepcopy(FAKE_INVOICE) invoice_data["lines"]["data"][0]["plan"] = None invoice_data["subscription"] = None invoice = Invoice.sync_from_stripe_data(invoice_data) self.assertIsNone(invoice.plan) @patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN)) @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Invoice.upcoming", return_value=deepcopy(FAKE_UPCOMING_INVOICE)) def test_upcoming_invoice(self, invoice_upcoming_mock, subscription_retrieve_mock, plan_retrieve_mock): invoice = UpcomingInvoice.upcoming() self.assertIsNotNone(invoice) self.assertIsNone(invoice.stripe_id) self.assertIsNone(invoice.save()) self.assertEqual(invoice.get_stripe_dashboard_url(), "") subscription_retrieve_mock.assert_called_once_with(api_key=ANY, expand=ANY, id=FAKE_SUBSCRIPTION["id"]) plan_retrieve_mock.assert_not_called() items = invoice.invoiceitems.all() self.assertEqual(1, len(items)) self.assertEqual(FAKE_SUBSCRIPTION["id"], items[0].stripe_id) # delete/update should do nothing self.assertEqual(invoice.invoiceitems.update(), 0) self.assertEqual(invoice.invoiceitems.delete(), 0) self.assertIsNotNone(invoice.plan) self.assertEqual(FAKE_PLAN["id"], invoice.plan.stripe_id) invoice._invoiceitems = [] items = invoice.invoiceitems.all() self.assertEqual(0, len(items)) self.assertIsNotNone(invoice.plan) @patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN)) @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Invoice.upcoming", return_value=deepcopy(FAKE_UPCOMING_INVOICE)) def test_upcoming_invoice_with_subscription(self, invoice_upcoming_mock, subscription_retrieve_mock, plan_retrieve_mock): invoice = Invoice.upcoming(subscription=Subscription(stripe_id=FAKE_SUBSCRIPTION["id"])) self.assertIsNotNone(invoice) self.assertIsNone(invoice.stripe_id) self.assertIsNone(invoice.save()) subscription_retrieve_mock.assert_called_once_with(api_key=ANY, expand=ANY, id=FAKE_SUBSCRIPTION["id"]) plan_retrieve_mock.assert_not_called() self.assertIsNotNone(invoice.plan) self.assertEqual(FAKE_PLAN["id"], invoice.plan.stripe_id) @patch("stripe.Plan.retrieve", return_value=deepcopy(FAKE_PLAN)) @patch("stripe.Subscription.retrieve", return_value=deepcopy(FAKE_SUBSCRIPTION)) @patch("stripe.Invoice.upcoming", return_value=deepcopy(FAKE_UPCOMING_INVOICE)) def test_upcoming_invoice_with_subscription_plan(self, invoice_upcoming_mock, subscription_retrieve_mock, plan_retrieve_mock): invoice = Invoice.upcoming(subscription_plan=Plan(stripe_id=FAKE_PLAN["id"])) self.assertIsNotNone(invoice) self.assertIsNone(invoice.stripe_id) self.assertIsNone(invoice.save()) subscription_retrieve_mock.assert_called_once_with(api_key=ANY, expand=ANY, id=FAKE_SUBSCRIPTION["id"]) plan_retrieve_mock.assert_not_called() self.assertIsNotNone(invoice.plan) self.assertEqual(FAKE_PLAN["id"], invoice.plan.stripe_id) @patch("stripe.Invoice.upcoming", side_effect=InvalidRequestError("Nothing to invoice for customer", None)) def test_no_upcoming_invoices(self, invoice_upcoming_mock): invoice = Invoice.upcoming() self.assertIsNone(invoice) @patch("stripe.Invoice.upcoming", side_effect=InvalidRequestError("Some other error", None)) def test_upcoming_invoice_error(self, invoice_upcoming_mock): with self.assertRaises(InvalidRequestError): Invoice.upcoming()
49.754209
112
0.730527
1,704
14,777
6.004108
0.076878
0.06236
0.07057
0.085427
0.822305
0.806471
0.796696
0.780373
0.773531
0.763562
0
0.001222
0.169588
14,777
296
113
49.922297
0.832532
0.01949
0
0.642857
0
0
0.131312
0.108931
0
0
0
0
0.245536
1
0.089286
false
0
0.040179
0
0.133929
0.004464
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
22a71f13cb6c7fae2355cf93d41793e0012eec3e
11,141
py
Python
vegans/utils/loading.py
unit8co/vegans
46ceebe917c3a58fec7a87644c3945a148cfc7c9
[ "MIT" ]
459
2019-01-30T17:04:40.000Z
2022-03-23T08:28:24.000Z
vegans/utils/loading.py
unit8co/vegans
46ceebe917c3a58fec7a87644c3945a148cfc7c9
[ "MIT" ]
7
2019-01-31T03:58:31.000Z
2021-12-14T15:30:28.000Z
vegans/utils/loading.py
unit8co/vegans
46ceebe917c3a58fec7a87644c3945a148cfc7c9
[ "MIT" ]
34
2019-01-30T18:20:33.000Z
2022-01-25T06:24:26.000Z
import torchvision from vegans.utils.architectures.mnist import ( preprocess_mnist, load_mnist_generator, load_mnist_adversary, load_mnist_encoder, load_mnist_decoder, load_mnist_autoencoder ) from vegans.utils.architectures.example import ( load_example_generator, load_example_adversary, load_example_encoder, load_example_decoder, load_example_autoencoder ) from vegans.utils.architectures.celeba import ( preprocess_celeba ) from vegans.utils.architectures.cifar import ( preprocess_cifar ) def load_data(root, which=None, **kwargs): """ Wrapper around torchvision.datasets with certain preprocessing steps So far available are: - MNIST: Handwritten digits with labels. Can be downloaded via `download=True`. - FashionMNIST: Clothes with labels. Can be downloaded via `download=True`. - CelebA: Pictures of celebrities with attributes. Must be downloaded from https://www.kaggle.com/jessicali9530/celeba-dataset and moved into `root` folder. - CIFAR: Pictures of objects with labels. Must be downloaded from http://www.cs.toronto.edu/~kriz/cifar.html and moved into `root` folder. Parameters ---------- root : str Path to root directory. Is created if `download=True` and the folder does not exists yet. which : str, optional One of the torchvision.datasets. **kwargs Keyword arguments to torchvision.datasets (`https://pytorch.org/vision/0.8/datasets.html`). Returns ------- np.array Numpy array or torch dataset with train and test data. """ available = ["MNIST", "FashionMNIST", "CelebA", "CIFAR"] which = which.replace("mnist", "MNIST") if which.lower() == "mnist": loader = eval("torchvision.datasets." + which) torch_data_train = loader(root=root, train=True, **kwargs) torch_data_test = loader(root=root, train=False, **kwargs) X_train, y_train = preprocess_mnist(torch_data_train, normalize=True, pad=2) X_test, y_test = preprocess_mnist(torch_data_test, normalize=True, pad=2) return X_train, y_train, X_test, y_test elif which.lower() == "fashionmnist": loader = eval("torchvision.datasets." + which) torch_data_train = loader(root=root, train=True, **kwargs) torch_data_test = loader(root=root, train=False, **kwargs) X_train, y_train = preprocess_mnist(torch_data_train, normalize=True, pad=2) X_test, y_test = preprocess_mnist(torch_data_test, normalize=True, pad=2) return X_train, y_train, X_test, y_test elif which.lower() == "celeba": train_dataloader = preprocess_celeba(root=root, **kwargs) return train_dataloader elif which.lower() == "cifar": X_train, y_train, X_test, y_test = preprocess_cifar(root=root, normalize=True, pad=0) return X_train, y_train, X_test, y_test else: raise ValueError("`which` must be one of {}.".format(available)) def load_generator(x_dim, z_dim, y_dim=None, which="example"): """ Load pre-defined (**NOT** pre-trained) architecture for a generator. Parameters ---------- x_dim : integer, list Indicating the number of dimensions for the real data. z_dim : integer, list Indicating the number of dimensions for the latent space. y_dim : None, optional Indicating the number of dimensions for the labels. which : str, optional Currently one of ["example", "mnist"]. Specifying "example" will provide you with a minimally working architecture for most use cases. However it's generic definition and underpowered structure will probably not result in desirable results. "mnist" provides you with a working architecture (depending of course on the choice of other hyper-parameters like optimizer) for both "mnist" datasets (MNIST and FashionMNIST). It might be useful for other problems where the input images are of the form (1, 32, 32) but it is not guaranteed. It's more powerful architecture might some take to train but should lead to reasonable results for certain use cases. Returns ------- nn.Module Generator architecture that can be passed to any GAN algorithm. """ available = ["example", "mnist"] if which == "example": return load_example_generator(x_dim, z_dim, y_dim=y_dim) elif which == "mnist": return load_mnist_generator(x_dim, z_dim, y_dim=y_dim) else: raise ValueError("`which` must be one of {}. Given: {}.".format(available, which)) def load_adversary(x_dim, z_dim, y_dim=None, adv_type="Critic", which="example"): """ Load pre-defined (**NOT** pre-trained) architecture for a adversary. Parameters ---------- x_dim : integer, list Indicating the number of dimensions for the real data. z_dim : integer, list Indicating the number of dimensions for the latent space. y_dim : None, optional Indicating the number of dimensions for the labels. which : str, optional Currently one of ["example", "mnist"]. Specifying "example" will provide you with a minimally working architecture for most use cases. However it's generic definition and underpowered structure will probably not result in desirable results. "mnist" provides you with a working architecture (depending of course on the choice of other hyper-parameters like optimizer) for both "mnist" datasets (MNIST and FashionMNIST). It might be useful for other problems where the input images are of the form (1, 32, 32) but it is not guaranteed. It's more powerful architecture might some take to train but should lead to reasonable results for certain use cases. Returns ------- nn.Module Adversary architecture that can be passed to any GAN algorithm. """ available = ["example", "mnist"] if which == "example": return load_example_adversary(x_dim, z_dim, y_dim=y_dim, adv_type=adv_type) elif which == "mnist": return load_mnist_adversary(x_dim, z_dim, y_dim=y_dim, adv_type=adv_type) else: raise ValueError("`which` must be one of {}. Given: {}.".format(available, which)) def load_encoder(x_dim, z_dim, y_dim=None, which="example"): """ Load pre-defined (**NOT** pre-trained) architecture for an encoder. Parameters ---------- x_dim : integer, list Indicating the number of dimensions for the real data. z_dim : integer, list Indicating the number of dimensions for the latent space. y_dim : None, optional Indicating the number of dimensions for the labels. which : str, optional Currently one of ["example", "mnist"]. Specifying "example" will provide you with a minimally working architecture for most use cases. However it's generic definition and underpowered structure will probably not result in desirable results. "mnist" provides you with a working architecture (depending of course on the choice of other hyper-parameters like optimizer) for both "mnist" datasets (MNIST and FashionMNIST). It might be useful for other problems where the input images are of the form (1, 32, 32) but it is not guaranteed. It's more powerful architecture might some take to train but should lead to reasonable results for certain use cases. Returns ------- nn.Module Encoder architecture that can be passed to certain GAN algorithms. """ available = ["example", "mnist"] if which == "example": return load_example_encoder(x_dim, z_dim, y_dim=y_dim) elif which == "mnist": return load_mnist_encoder(x_dim, z_dim, y_dim=y_dim) else: raise ValueError("`which` must be one of {}. Given: {}.".format(available, which)) def load_decoder(x_dim, z_dim, y_dim=None, which="example"): """ Load pre-defined (**NOT** pre-trained) architecture for a decoder. Parameters ---------- x_dim : integer, list Indicating the number of dimensions for the real data. z_dim : integer, list Indicating the number of dimensions for the latent space. y_dim : None, optional Indicating the number of dimensions for the labels. which : str, optional Currently one of ["example", "mnist"]. Specifying "example" will provide you with a minimally working architecture for most use cases. However it's generic definition and underpowered structure will probably not result in desirable results. "mnist" provides you with a working architecture (depending of course on the choice of other hyper-parameters like optimizer) for both "mnist" datasets (MNIST and FashionMNIST). It might be useful for other problems where the input images are of the form (1, 32, 32) but it is not guaranteed. It's more powerful architecture might some take to train but should lead to reasonable results for certain use cases. Returns ------- nn.Module Decoder architecture that can be passed to some GAN algorithms and VAEs. """ available = ["example", "mnist"] if which == "example": return load_example_decoder(x_dim, z_dim, y_dim=y_dim) elif which == "mnist": return load_mnist_decoder(x_dim, z_dim, y_dim=y_dim) else: raise ValueError("`which` must be one of {}. Given: {}.".format(available, which)) def load_autoencoder(x_dim, z_dim, y_dim=None, which="example"): """ Load pre-defined (**NOT** pre-trained) architecture for an auto-encoder. Parameters ---------- x_dim : integer, list Indicating the number of dimensions for the real data. z_dim : integer, list Indicating the number of dimensions for the latent space. y_dim : None, optional Indicating the number of dimensions for the labels. which : str, optional Currently one of ["example", "mnist"]. Specifying "example" will provide you with a minimally working architecture for most use cases. However it's generic definition and underpowered structure will probably not result in desirable results. "mnist" provides you with a working architecture (depending of course on the choice of other hyper-parameters like optimizer) for both "mnist" datasets (MNIST and FashionMNIST). It might be useful for other problems where the input images are of the form (1, 32, 32) but it is not guaranteed. It's more powerful architecture might some take to train but should lead to reasonable results for certain use cases. Returns ------- nn.Module Auto-encoder architecture that can be passed to for example the EBGAN. """ available = ["example", "mnist"] if which == "example": return load_example_autoencoder(x_dim, z_dim, y_dim=y_dim) elif which == "mnist": return load_mnist_autoencoder(x_dim, z_dim, y_dim=y_dim) else: raise ValueError("`which` must be one of {}. Given: {}.".format(available, which))
47.611111
134
0.687191
1,529
11,141
4.894702
0.128188
0.016034
0.023383
0.016034
0.826029
0.808391
0.800641
0.783137
0.764832
0.73116
0
0.004169
0.224935
11,141
233
135
47.815451
0.862536
0.596805
0
0.4875
0
0
0.121581
0.010638
0
0
0
0
0
1
0.075
false
0
0.0625
0
0.3125
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
22e11204bc8f0befcd2b1feed56ed266880c9b5f
17,437
py
Python
database/tests.py
gbriones1/django-skelleton
ee067594e3994f1bac5bf754f618d365bb5248d8
[ "BSD-3-Clause" ]
null
null
null
database/tests.py
gbriones1/django-skelleton
ee067594e3994f1bac5bf754f618d365bb5248d8
[ "BSD-3-Clause" ]
10
2020-06-05T16:38:25.000Z
2022-03-11T23:12:12.000Z
database/tests.py
gbriones1/django-skelleton
ee067594e3994f1bac5bf754f618d365bb5248d8
[ "BSD-3-Clause" ]
null
null
null
from django.test import TestCase from django.test.client import RequestFactory from database.serializers import * from database.models import * rf = RequestFactory() class ProductTestCase(TestCase): PRODUCTS_DATA = [ { "code": "0000", "name": "TEST", "providerName": "TEST", "brandName": "TEST", "applianceName": "", "price": 50.50, "discount": 0.0, }, { "code": "0001", "name": "TEST2", "providerName": "TEST2", "brandName": "TEST", "applianceName": "TEST2", "price": 100, "discount": 30 } ] def test_product_create(self): data = ProductTestCase.PRODUCTS_DATA[0] s = ProductSerializer(data=data) self.assertTrue(s.is_valid()) s.save() i = s.instance self.assertGreaterEqual(i.id, 1) self.assertGreaterEqual(i.brand.id, 1) self.assertGreaterEqual(i.provider.id, 1) data = ProductTestCase.PRODUCTS_DATA[1] s = ProductSerializer(data=data) self.assertTrue(s.is_valid()) s.save() i2 = s.instance self.assertEqual(i2.brand.id, i.brand.id) self.assertNotEqual(i2.provider.id, i.provider.id) def test_product_create_dup(self): data = ProductTestCase.PRODUCTS_DATA[0] s = ProductSerializer(data=data) s.is_valid() s.save() s = ProductSerializer(data=data) self.assertFalse(s.is_valid()) class InputTestCase(TestCase): def setUp(self): s = ProductSerializer(data=ProductTestCase.PRODUCTS_DATA, many=True) s.is_valid() s.save() self.products = s.instance s = OrganizationSerializer(data={"name":"Storage1"}) s.is_valid() s.save() self.organization = s.instance s = OrganizationStorageSerializer(data={ "organization":self.organization.id, "storage_type":"Stock" }) s.is_valid() s.save() self.organization_storage = s.instance def test_input_create(self): data = { "organization_storage": self.organization_storage.id, "invoice": "00000", "invoice_date": "2019-02-27", "movement_product_set": json.dumps([{ "product": self.products[0].id, "amount": 5, "price": 50.55, "discount": 50 }]) } s = InputSerializer(data=data) self.assertTrue(s.is_valid()) s.save() i = s.instance self.assertGreaterEqual(i.id, 1) p = i.movement_product_set.get(product=self.products[0].id).product self.assertEqual(float(p.discount), 50) self.assertEqual(float(p.price), 50.55) data2 = { "organization_storage": self.organization_storage.id, "invoice": "00001", "invoice_date": "2019-02-28", "movement_product_set": json.dumps([{ "product": self.products[0].id, "amount": 4, "price": 60, "discount": 30 }]) } s = InputSerializer(data=data2) self.assertTrue(s.is_valid()) s.save() i2 = s.instance self.assertGreaterEqual(i2.id, 2) p = i2.movement_product_set.get(product=self.products[0]).product self.assertEqual(float(p.discount), 30) self.assertEqual(float(p.price), 60) sp = Storage_Product.objects.get(organization_storage=self.organization_storage, product=self.products[0]) self.assertEqual(sp.amount, 9) self.assertEqual(i2.invoice.price, 168) data3 = { "organization_storage": self.organization_storage.id, "invoice": "00000", "invoice_date": "2019-02-27", "movement_product_set": json.dumps([{ "product": self.products[1].id, "amount": 10, "price": 120, "discount": 20 }]) } s = InputSerializer(data=data3) self.assertTrue(s.is_valid()) s.save() i3 = s.instance self.assertEqual(i3.invoice.id, i.invoice.id) self.assertEqual(float(i3.invoice.price), 1086.38) class OutputTestCase(TestCase): def setUp(self): s = ProductSerializer(data=ProductTestCase.PRODUCTS_DATA, many=True) s.is_valid() s.save() self.products = s.instance s = OrganizationSerializer(data={"name":"Storage1"}) s.is_valid() s.save() self.organization = s.instance s = OrganizationStorageSerializer(data={ "organization":self.organization.id, "storage_type_name":"Stock" }) s.is_valid() s.save() self.organization_storage = s.instance def test_output_create(self): data = { "organization_storage": self.organization_storage.id, "movement_product_set": json.dumps([{ "product": self.products[0].id, "amount": 7, "price": 50.55, }]) } s = OutputSerializer(data=data) self.assertTrue(s.is_valid()) s.save() i = s.instance sp = Storage_Product.objects.get(organization_storage=self.organization_storage, product=self.products[0]) self.assertEqual(sp.amount, 2) data = { "employee": self.employees[0].id, "destination": self.customers[0].id, "replacer": self.organizations[0].id, "organization_storage": self.organization_storage.id, "movement_product_set": [json.dumps([{ "product": self.products[0].id, "amount": 1, "price": 50.55, }])] } s = OutputSerializer(data=data) self.assertTrue(s.is_valid()) s.save() i = s.instance sp = Storage_Product.objects.get(organization_storage=self.organization_storage, product=self.products[0]) self.assertEqual(sp.amount, 1) self.assertEqual(i.employee.id, self.employees[0].id) self.assertEqual(i.destination.id, self.customers[0].id) self.assertEqual(i.replacer.id, self.organizations[0].id) def test_output_exeed(self): data = { "organization_storage": self.organization_storage.id, "movement_product_set": json.dumps([{ "product": self.products[0].id, "amount": 50000, "price": 50.55, }]) } s = OutputSerializer(data=data) self.assertTrue(s.is_valid()) s.save() i = s.instance sp = Storage_Product.objects.get(organization_storage=self.organization_storage, product=self.products[0]) self.assertEqual(sp.amount, 0) def test_unexistent(self): data = { "organization_storage": 90000, "movement_product_set": json.dumps([{ "product": self.products[0].id, "amount": 7, "price": 50.55, }]) } s = OutputSerializer(data=data) self.assertFalse(s.is_valid()) data = { "organization_storage": self.organization_storage.id, "movement_product_set": json.dumps([{ "product": 90000, "amount": 7, "price": 50.55, }]) } s = OutputSerializer(data=data) self.assertFalse(s.is_valid()) class EmployeeTestCase(TestCase): def setUp(self): data = { "name": "Employee" } s = EmployeeSerializer(data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "Employee") self.employees = [s.instance] def test_edit(self): data = { "id": self.employees[0].id, "name": "new name" } s = EmployeeSerializer(self.employees[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "new name") self.assertEqual(s.instance.phone, self.employees[0].phone) self.assertEqual(s.instance.id, self.employees[0].id) data["phone"] = "1234567890" s = EmployeeSerializer(self.employees[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.phone, "1234567890") self.assertEqual(s.instance.id, self.employees[0].id) class CustomerTestCase(TestCase): def setUp(self): data = { "name": "Customer" } s = CustomerSerializer(data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "Customer") self.customers = [s.instance] def test_edit(self): data = { "id": self.customers[0].id, "name": "new name" } s = CustomerSerializer(self.customers[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "new name") self.assertEqual(s.instance.id, self.customers[0].id) self.assertEqual(len(s.instance.customer_contact_set.all()), 0) data["customer_contact_set"] = json.dumps([{ "name": "contact1" },{ "name": "contact2", "email": "contact@customer.com", "for_quotation": True, "for_invoice": False, }]) s = CustomerSerializer(self.customers[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.id, self.customers[0].id) self.assertEqual(len(s.instance.customer_contact_set.all()), 2) self.assertEqual(s.instance.customer_contact_set.all()[0].name, "contact1") self.assertEqual(s.instance.customer_contact_set.all()[0].email, "") self.assertEqual(s.instance.customer_contact_set.all()[0].for_quotation, False) self.assertEqual(s.instance.customer_contact_set.all()[1].name, "contact2") self.assertEqual(s.instance.customer_contact_set.all()[1].email, "contact@customer.com") self.assertEqual(s.instance.customer_contact_set.all()[1].for_quotation, True) class ProviderTestCase(TestCase): def setUp(self): data = { "name": "Provider" } s = ProviderSerializer(data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "Provider") self.providers = [s.instance] def test_edit(self): data = { "id": self.providers[0].id, "name": "new name" } s = ProviderSerializer(self.providers[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "new name") self.assertEqual(s.instance.id, self.providers[0].id) self.assertEqual(len(s.instance.provider_contact_set.all()), 0) data["provider_contact_set"] = json.dumps([{ "name": "contact1" },{ "name": "contact2", "email": "contact@provider.com", "for_orders": True }]) s = ProviderSerializer(self.providers[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.id, self.providers[0].id) self.assertEqual(len(s.instance.provider_contact_set.all()), 2) self.assertEqual(s.instance.provider_contact_set.all()[0].name, "contact1") self.assertEqual(s.instance.provider_contact_set.all()[0].email, "") self.assertEqual(s.instance.provider_contact_set.all()[0].for_orders, False) self.assertEqual(s.instance.provider_contact_set.all()[1].name, "contact2") self.assertEqual(s.instance.provider_contact_set.all()[1].email, "contact@provider.com") self.assertEqual(s.instance.provider_contact_set.all()[1].for_orders, True) data["provider_contact_set"] = json.dumps([{ "name": "contact3" },{ "name": "contact2", "email": "contact2@provider.com" }]) s = ProviderSerializer(self.providers[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.id, self.providers[0].id) self.assertEqual(len(s.instance.provider_contact_set.all()), 2) self.assertEqual(s.instance.provider_contact_set.all()[0].name, "contact3") self.assertEqual(s.instance.provider_contact_set.all()[0].email, "") self.assertEqual(s.instance.provider_contact_set.all()[0].for_orders, False) self.assertEqual(s.instance.provider_contact_set.all()[1].name, "contact2") self.assertEqual(s.instance.provider_contact_set.all()[1].email, "contact2@provider.com") self.assertEqual(s.instance.provider_contact_set.all()[1].for_orders, False) class OrganizationTestCase(TestCase): def setUp(self): data = { "name": "Organization" } s = OrganizationSerializer(data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "Organization") self.organizations = [s.instance] def test_edit(self): data = { "id": self.organizations[0].id, "name": "new name" } s = OrganizationSerializer(self.organizations[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "new name") self.assertEqual(s.instance.id, self.organizations[0].id) class OrderTestCase(TestCase): def setUp(self): s = ProductSerializer(data=ProductTestCase.PRODUCTS_DATA, many=True) s.is_valid() s.save() self.products = s.instance s = OrganizationSerializer(data={"name":"Storage1"}) s.is_valid() s.save() self.organization = s.instance s = OrganizationStorageSerializer(data={ "organization":self.organization.id, "storage_type":"Stock" }) s.is_valid() s.save() self.organization_storage = s.instance data = { "name": "Provider" } s = ProviderSerializer(data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "Provider") self.providers = [s.instance] def test_order_create(self): data = { "organization_storage": self.organization_storage.id, "provider": self.providers[0].id, "order_product_set": json.dumps([{ "product": self.products[0].id, "amount": 5 }]) } s = OrderSerializer(data=data) self.assertTrue(s.is_valid()) s.save() self.assertGreaterEqual(s.instance.id, 1) self.assertEqual(s.instance.status, Order.STATUS_PENDING) class InvoiceTestCase(TestCase): def setUp(self): data = { "name": "Provider" } s = ProviderSerializer(data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.name, "Provider") self.providers = [s.instance] data = { "date": "2018-10-05", "number": "0000", "provider": self.providers[0].id } s = InvoiceSerializer(data=data) self.assertTrue(s.is_valid()) s.save() self.assertGreaterEqual(s.instance.id, 1) self.assertEqual(s.instance.price, 0) data["price"] = 500 s = InvoiceSerializer(s.instance, data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.number, "0000") self.assertEqual(s.instance.price, 500) self.invoices = [s.instance] def test_add_payment(self): data = { "id": self.invoices[0].id, "payment_set": json.dumps([ { 'date': '2018-10-06', 'amount': 100 }, { 'date': '2018-10-07', 'amount': 50.5 } ]) } s = InvoiceSerializer(self.invoices[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.price, 500) self.assertEqual(s.instance.paid, False) self.assertEqual(s.instance.id, self.invoices[0].id) data = { "id": self.invoices[0].id, "payment_set": json.dumps([ { 'date': '2018-10-06', 'amount': 100 }, { 'date': '2018-10-07', 'amount': 50.5 }, { 'date': '2018-10-08', 'amount': 349.5 } ]) } s = InvoiceSerializer(self.invoices[0], data=data) self.assertTrue(s.is_valid()) s.save() self.assertEqual(s.instance.price, 500) self.assertEqual(s.instance.paid, True) self.assertEqual(s.instance.id, self.invoices[0].id)
34.804391
114
0.558353
1,859
17,437
5.133943
0.083378
0.07544
0.080469
0.120704
0.80899
0.774099
0.74979
0.729254
0.716262
0.656643
0
0.029736
0.303779
17,437
501
115
34.804391
0.756425
0
0
0.610753
0
0
0.098119
0.002409
0
0
0
0
0.227957
1
0.043011
false
0
0.008602
0
0.073118
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
22ec9f51b4438959086a7bbe0ce25f58cede6d8b
27
py
Python
1.py
subline-collab/Bacchus
8ef34bdaac7c86bc63b1573ad64824eb1219680a
[ "Apache-2.0" ]
null
null
null
1.py
subline-collab/Bacchus
8ef34bdaac7c86bc63b1573ad64824eb1219680a
[ "Apache-2.0" ]
null
null
null
1.py
subline-collab/Bacchus
8ef34bdaac7c86bc63b1573ad64824eb1219680a
[ "Apache-2.0" ]
null
null
null
print('I can use github!')
13.5
26
0.666667
5
27
3.6
1
0
0
0
0
0
0
0
0
0
0
0
0.148148
27
1
27
27
0.782609
0
0
0
0
0
0.62963
0
0
0
0
0
0
1
0
true
0
0
0
0
1
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
1
0
0
0
0
1
0
6
22f47630ffa4977dcf140fb67703b870cf2ea61c
55
py
Python
src/data/__init__.py
qway/nerfmeshes
d983dcbbcfec1337c9f2040969213c6d1ea0c39e
[ "MIT" ]
113
2020-10-30T11:27:43.000Z
2022-03-28T04:28:36.000Z
mesh/src/data/__init__.py
ashwinpn/Computer-Vision
9dc3abfe416385171b76e2bad6872e10f36a12b4
[ "MIT" ]
11
2020-09-07T07:15:56.000Z
2022-02-26T19:21:00.000Z
mesh/src/data/__init__.py
ashwinpn/Computer-Vision
9dc3abfe416385171b76e2bad6872e10f36a12b4
[ "MIT" ]
17
2020-11-05T06:24:07.000Z
2022-03-18T21:30:35.000Z
from data.loaders import * from .data_helpers import *
18.333333
27
0.781818
8
55
5.25
0.625
0.380952
0
0
0
0
0
0
0
0
0
0
0.145455
55
2
28
27.5
0.893617
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
fe03313c52ed8c374fd58896fa2bcbda7d620436
160
py
Python
contracts/clear_program.py
dezzydez007/Algo-single-transaction-hackathon
3d37b4c9a4750fbbc2c604cb1980a234792c9425
[ "MIT" ]
1
2022-03-22T21:14:15.000Z
2022-03-22T21:14:15.000Z
contracts/clear_program.py
dezzydez007/Algo-single-transaction-hackathon
3d37b4c9a4750fbbc2c604cb1980a234792c9425
[ "MIT" ]
null
null
null
contracts/clear_program.py
dezzydez007/Algo-single-transaction-hackathon
3d37b4c9a4750fbbc2c604cb1980a234792c9425
[ "MIT" ]
null
null
null
from pyteal import * def clear_program(): return Int(1) if __name__ == "__main__": print(compileTeal(clear_program(), Mode.Application, version=6))
16
68
0.7
20
160
5.1
0.9
0.235294
0
0
0
0
0
0
0
0
0
0.015038
0.16875
160
9
69
17.777778
0.75188
0
0
0
0
0
0.05
0
0
0
0
0
0
1
0.2
true
0
0.2
0.2
0.6
0.2
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
1
1
0
0
6
fe185af40767e373cf9d84cccd16699dd1a841ef
179
py
Python
smsymer/evm/exception/invalidOperationException.py
Troublor/smSymer
05ec597325a72d9338306a7aba6cd07d4b4c6011
[ "MIT" ]
3
2019-06-02T15:30:47.000Z
2021-01-05T06:15:55.000Z
smsymer/evm/exception/invalidOperationException.py
Troublor/smSymer
05ec597325a72d9338306a7aba6cd07d4b4c6011
[ "MIT" ]
1
2021-06-12T17:03:33.000Z
2021-06-12T17:03:33.000Z
smsymer/evm/exception/invalidOperationException.py
Troublor/smSymer
05ec597325a72d9338306a7aba6cd07d4b4c6011
[ "MIT" ]
1
2020-12-04T01:51:29.000Z
2020-12-04T01:51:29.000Z
class InvalidOperationException(Exception): def __init__(self, byte): self.byte = byte def __str__(self): return "Invalid bytecode: {0}".format(self.byte)
29.833333
56
0.670391
20
179
5.6
0.65
0.214286
0
0
0
0
0
0
0
0
0
0.007092
0.212291
179
6
56
29.833333
0.787234
0
0
0
0
0
0.116667
0
0
0
0
0
0
1
0.4
false
0
0
0.2
0.8
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
6
fe1b1b396f7fad69eb68cd6d4051dc7df9511b76
111
py
Python
model/__init__.py
AdrienBenamira/k_coloring_graph_AlphaZeroGo
c8f3271a2b117c95616b5752e134114ee8b20294
[ "MIT" ]
1
2020-04-05T03:12:22.000Z
2020-04-05T03:12:22.000Z
model/__init__.py
AdrienBenamira/k_coloring_graph_AlphaZeroGo
c8f3271a2b117c95616b5752e134114ee8b20294
[ "MIT" ]
null
null
null
model/__init__.py
AdrienBenamira/k_coloring_graph_AlphaZeroGo
c8f3271a2b117c95616b5752e134114ee8b20294
[ "MIT" ]
null
null
null
from .Arena import * from .Coach import * from .MCTS import * from .NeuralNet import * from .GraphNNet import *
22.2
24
0.738739
15
111
5.466667
0.466667
0.487805
0
0
0
0
0
0
0
0
0
0
0.171171
111
5
25
22.2
0.891304
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a3ade82f9e282f844770b8a610cfcbb17ce12b71
16,411
py
Python
api/resources_portal/test/views/test_material.py
AlexsLemonade/resources-portal
d91c6c8d6135461faccbc78ef2b0be3f9b358f21
[ "BSD-3-Clause" ]
null
null
null
api/resources_portal/test/views/test_material.py
AlexsLemonade/resources-portal
d91c6c8d6135461faccbc78ef2b0be3f9b358f21
[ "BSD-3-Clause" ]
536
2019-11-13T15:49:03.000Z
2022-03-28T20:17:24.000Z
api/resources_portal/test/views/test_material.py
AlexsLemonade/resources-portal
d91c6c8d6135461faccbc78ef2b0be3f9b358f21
[ "BSD-3-Clause" ]
1
2020-04-03T02:07:29.000Z
2020-04-03T02:07:29.000Z
from django.forms.models import model_to_dict from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase from faker import Faker from guardian.shortcuts import assign_perm from resources_portal.models import Material, MaterialShareEvent, Notification from resources_portal.test.factories import ( AttachmentFactory, MaterialFactory, MaterialRequestFactory, OrganizationFactory, ShippingRequirementFactory, UserFactory, ) fake = Faker() class TestMaterialListTestCase(APITestCase): """ Tests /materials list operations. """ def setUp(self): self.url = reverse("material-list") self.user = UserFactory() self.user_without_perms = UserFactory() self.organization = OrganizationFactory(owner=self.user) self.material = MaterialFactory( contact_user=self.user, organization=self.organization, category="PLASMID" ) self.material_data = model_to_dict(self.material) def test_post_request_with_no_data_fails(self): self.client.force_authenticate(user=self.user) response = self.client.post(self.url, {}) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_post_request_with_valid_data_succeeds(self): self.client.force_authenticate(user=self.user) response = self.client.post(self.url, self.material_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(self.material.category, response.json()["category"]) self.assertEqual( len(Notification.objects.filter(notification_type="MATERIAL_ADDED")), self.organization.members.count(), ) def test_import_other_with_valid_data_succeeds(self): self.client.force_authenticate(user=self.user) self.material_data["imported"] = True self.material_data["type"] = "OTHER" response = self.client.post(self.url, self.material_data, format="json") self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(self.material.category, response.json()["category"]) self.assertEqual( len(Notification.objects.filter(notification_type="MATERIAL_ADDED")), self.organization.members.count(), ) def test_post_request_without_permission_forbidden(self): self.client.force_authenticate(user=self.user_without_perms) response = self.client.post(self.url, self.material_data, format="json") self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_post_request_from_unauthenticated_forbidden(self): self.client.force_authenticate(user=None) response = self.client.post(self.url, self.material_data, format="json") self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_get_request_succeeds(self): self.client.force_authenticate(user=None) response = self.client.get(self.url) self.assertEqual(response.status_code, status.HTTP_200_OK) def test_get_request_limit_succeeds(self): for i in range(4): MaterialFactory(category="CELL_LINE") self.client.force_authenticate(user=None) response = self.client.get(self.url + "?limit=3") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.json()["results"]), 3) def test_get_request_filter_succeeds(self): for i in range(11): last_material = MaterialFactory(category="CELL_LINE") self.client.force_authenticate(user=None) response = self.client.get(self.url + "?category=CELL_LINE") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.json()["results"]), 10) response = self.client.get(self.url + "?category=PLASMID") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.json()["results"]), 1) last_org_id = last_material.organization.id response = self.client.get(self.url + f"?organization__id={last_org_id}") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.json()["results"]), 1) def test_import_previously_imported_geo_fails(self): self.client.force_authenticate(user=self.user) accession_code = "12345" material = MaterialFactory( imported=True, import_source="GEO", contact_user=self.user, organization=self.organization, additional_metadata={"accession_code": accession_code}, ) imported_data = model_to_dict(material) # Try to import the same material again response = self.client.post(self.url, imported_data, format="json") self.assertEqual(response.status_code, 400) self.assertEqual(response.json()["error_code"], "ALREADY_IMPORTED") self.assertEqual( response.json()["material"]["additional_metadata"]["accession_code"], accession_code ) def test_import_previously_imported_protocol_fails(self): self.client.force_authenticate(user=self.user) protocol_doi = "12345" response = self.client.post( self.url, {"import_source": "PROTOCOLS_IO", "protocol_doi": protocol_doi} ) material = MaterialFactory( imported=True, import_source="PROTOCOLS_IO", contact_user=self.user, organization=self.organization, additional_metadata={"protocol_doi": protocol_doi}, ) imported_data = model_to_dict(material) # Try to import the same material again response = self.client.post(self.url, imported_data, format="json") self.assertEqual(response.status_code, 400) self.assertEqual(response.json()["error_code"], "ALREADY_IMPORTED") self.assertEqual( response.json()["material"]["additional_metadata"]["protocol_doi"], protocol_doi ) def test_import_previously_imported_sra_fails(self): self.client.force_authenticate(user=self.user) accession_code = "12345" response = self.client.post( self.url, {"import_source": "SRA", "accession_code": accession_code} ) material = MaterialFactory( imported=True, import_source="SRA", contact_user=self.user, organization=self.organization, additional_metadata={"accession_code": accession_code}, ) imported_data = model_to_dict(material) # Try to import the same material again response = self.client.post(self.url, imported_data, format="json") self.assertEqual(response.status_code, 400) self.assertEqual(response.json()["error_code"], "ALREADY_IMPORTED") self.assertEqual( response.json()["material"]["additional_metadata"]["accession_code"], accession_code ) class TestSingleMaterialTestCase(APITestCase): """ Tests /materials detail operations. """ def setUp(self): self.user = UserFactory() self.user_without_perms = UserFactory() self.organization = OrganizationFactory(owner=self.user) self.organization2 = OrganizationFactory(owner=self.user) self.organization_without_perms = OrganizationFactory() self.material = MaterialFactory(contact_user=self.user, organization=self.organization) self.url = reverse("material-detail", args=[self.material.id]) assign_perm("delete_resources", self.user, self.organization) def test_get_request_returns_no_requests_if_no_user(self): self.client.force_authenticate(user=None) # Add a request to be filtered out. MaterialRequestFactory(material=self.material) response = self.client.get(self.url) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue("requests" not in response.json()) def test_get_request_returns_all_requests_if_user_in_org(self): self.client.force_authenticate(user=self.user) # Add a request to be filtered out. MaterialRequestFactory(material=self.material) response = self.client.get(self.url) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.json()["requests"]), 1) def test_get_request_filters_requests_if_user_not_in_org(self): requester = UserFactory() self.client.force_authenticate(user=requester) # Add a request to not be filtered out. MaterialRequestFactory(requester=requester, material=self.material) # Add a request to be filtered out. MaterialRequestFactory(material=self.material) response = self.client.get(self.url) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.json()["requests"]), 1) def test_put_request_updates_a_material(self): self.client.force_authenticate(user=self.user) material_json = self.client.get(self.url).json() new_url = fake.url() material_json["url"] = new_url material_json["contact_user"] = material_json["contact_user"]["id"] material_json["is_archived"] = True material_json["organization"] = material_json["organization"]["id"] response = self.client.put(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( len(Notification.objects.filter(notification_type="MATERIAL_ARCHIVED")), self.organization.members.count(), ) material = Material.objects.get(pk=self.material.id) self.assertEqual(material.url, new_url) def test_put_request_can_update_organization(self): self.client.force_authenticate(user=self.user) material_json = self.client.get(self.url).json() material_json["organization"] = self.organization2.id material_json["contact_user"] = material_json["contact_user"]["id"] response = self.client.put(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertTrue(self.material in self.organization2.materials.all()) def test_put_request_cannot_archive_with_active_requests(self): MaterialRequestFactory(material=self.material) self.client.force_authenticate(user=self.user) material_json = self.client.get(self.url).json() material_json["is_archived"] = True response = self.client.put(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_put_request_on_organization_without_permissions_for_both_orgs_fails(self): self.client.force_authenticate(user=self.user) material_json = self.client.get(self.url).json() material_json["organization"] = self.organization_without_perms.id material_json["contact_user"] = material_json["contact_user"]["id"] response = self.client.put(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_put_request_cannot_change_category(self): self.client.force_authenticate(user=self.user) material_json = self.client.get(self.url).json() material_json["category"] = "PLASMID" response = self.client.put(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_put_request_without_permission_forbidden(self): self.client.force_authenticate(user=self.user_without_perms) material_json = self.client.get(self.url).json() new_url = fake.url() material_json["url"] = new_url material_json["contact_user"] = material_json["contact_user"]["id"] response = self.client.put(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_put_request_from_unauthenticated_forbidden(self): self.client.force_authenticate(user=None) material_json = self.client.get(self.url).json() new_url = fake.url() material_json["url"] = new_url material_json["contact_user"] = material_json["contact_user"]["id"] response = self.client.put(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_patch_request_updates_a_material_irb(self): self.client.force_authenticate(user=self.user) material_json = {"needs_irb": True} response = self.client.patch(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( len(MaterialShareEvent.objects.filter(event_type="MATERIAL_IRB_REQUIREMENTS_CHANGED")), 1, ) def test_patch_request_updates_a_material_abstract(self): self.client.force_authenticate(user=self.user) material_json = {"needs_abstract": True} response = self.client.patch(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( len( MaterialShareEvent.objects.filter( event_type="MATERIAL_ABSTRACT_REQUIREMENTS_CHANGED" ) ), 1, ) def test_patch_request_updates_a_material_shipping(self): self.client.force_authenticate(user=self.user) shipping_requirement = ShippingRequirementFactory(organization=self.organization) material_json = {"shipping_requirement": shipping_requirement.id} response = self.client.patch(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( len( MaterialShareEvent.objects.filter( event_type="MATERIAL_SHIPPING_REQUIREMENTS_CHANGED" ) ), 1, ) def test_patch_request_updates_a_material_mta(self): self.client.force_authenticate(user=self.user) new_mta = AttachmentFactory(owned_by_user=self.user) material_json = {"mta_attachment": new_mta.id} response = self.client.patch(self.url, material_json) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( len(MaterialShareEvent.objects.filter(event_type="MATERIAL_MTA_REQUIREMENTS_CHANGED")), 1, ) def test_delete_request_deletes_a_material(self): self.client.force_authenticate(user=self.user) material_id = self.material.id response = self.client.delete(self.url) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Material.objects.filter(id=material_id).count(), 0) self.assertEqual( len(Notification.objects.filter(notification_type="MATERIAL_DELETED")), self.organization.members.count(), ) def test_delete_material_with_request_fails(self): MaterialRequestFactory(material=self.material) self.client.force_authenticate(user=self.user) response = self.client.delete(self.url) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) def test_delete_request_without_permission_forbidden(self): self.client.force_authenticate(user=self.user_without_perms) response = self.client.delete(self.url) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_delete_request_from_unauthenticated_forbidden(self): self.client.force_authenticate(user=None) response = self.client.delete(self.url) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED) def test_delete_only_soft_deletes_objects(self): self.client.force_authenticate(user=self.user) material_id = self.material.id response = self.client.delete(self.url) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(Material.deleted_objects.filter(id=material_id).count(), 1)
39.640097
99
0.694839
1,894
16,411
5.759768
0.095565
0.065084
0.080117
0.085067
0.815198
0.776331
0.743881
0.732973
0.721148
0.691264
0
0.010012
0.20273
16,411
413
100
39.736077
0.823754
0.019743
0
0.596721
0
0
0.067414
0.010779
0
0
0
0
0.193443
1
0.104918
false
0
0.098361
0
0.209836
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a3b3cdf1e4740ddc05016d9609c33d8293b1a4f2
8,883
py
Python
tests/test_blackouts.py
richerve/alerta
6dd83aa09fa8b951611382c3250ef561f7ddc26d
[ "Apache-2.0" ]
1
2019-02-07T14:37:12.000Z
2019-02-07T14:37:12.000Z
tests/test_blackouts.py
francopeapea/alerta
dce18f0ab93a1feff518289d51261cccf151d3e8
[ "Apache-2.0" ]
null
null
null
tests/test_blackouts.py
francopeapea/alerta
dce18f0ab93a1feff518289d51261cccf151d3e8
[ "Apache-2.0" ]
null
null
null
import json import unittest from alerta.app import create_app, db, plugins from alerta.exceptions import BlackoutPeriod from alerta.models.key import ApiKey from alerta.plugins import PluginBase class BlackoutsTestCase(unittest.TestCase): def setUp(self): test_config = { 'TESTING': True, 'AUTH_REQUIRED': True, 'CUSTOMER_VIEWS': True, 'PLUGINS': ['reject'] } self.app = create_app(test_config) self.client = self.app.test_client() self.alert = { 'event': 'node_marginal', 'resource': 'node404', 'environment': 'Production', 'service': ['Network'], 'severity': 'warning', 'correlate': ['node_down', 'node_marginal', 'node_up'] } with self.app.test_request_context('/'): self.app.preprocess_request() self.admin_api_key = ApiKey( user='admin', scopes=['admin', 'read', 'write'], text='demo-key' ) self.customer_api_key = ApiKey( user='admin', scopes=['admin', 'read', 'write'], text='demo-key', customer='Foo' ) self.admin_api_key.create() self.customer_api_key.create() def tearDown(self): db.destroy() def test_suppress_blackout(self): plugins.plugins['blackout'] = SuppressionBlackout() self.headers = { 'Authorization': 'Key %s' % self.admin_api_key.key, 'Content-type': 'application/json' } # create alert response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) # create blackout response = self.client.post('/blackout', data=json.dumps({"environment": "Production"}), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) blackout_id = data['id'] # suppress alert response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 202) self.headers = { 'Authorization': 'Key %s' % self.customer_api_key.key, 'Content-type': 'application/json' } # create alert response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 202) self.headers = { 'Authorization': 'Key %s' % self.admin_api_key.key, 'Content-type': 'application/json' } response = self.client.delete('/blackout/' + blackout_id, headers=self.headers) self.assertEqual(response.status_code, 200) def test_notification_blackout(self): plugins.plugins['blackout'] = NotificationBlackout() self.headers = { 'Authorization': 'Key %s' % self.admin_api_key.key, 'Content-type': 'application/json' } # create new blackout response = self.client.post('/blackout', data=json.dumps({"environment": "Production", "service": ["Network"]}), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) blackout_id = data['id'] # new alert should be status=blackout response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'blackout') # duplicate alert should be status=blackout response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'blackout') # duplicate alert should be status=blackout (again) response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'blackout') # increase severity alert should be status=blackout self.alert['severity'] = 'major' response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'blackout') # increase severity alert should be status=blackout (again) self.alert['severity'] = 'critical' response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'blackout') # decrease severity alert should be status=blackout self.alert['severity'] = 'minor' response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'blackout') # decrease severity alert should be status=blackout (again) self.alert['severity'] = 'warning' response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'blackout') # normal severity alert should be status=closed self.alert['severity'] = 'ok' response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'closed') # normal severity alert should be status=closed (again) response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'closed') # non-normal severity alert should be status=blackout (again) self.alert['severity'] = 'major' response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'blackout') # decrease severity alert should be status=blackout self.alert['severity'] = 'minor' response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'blackout') # remove blackout response = self.client.delete('/blackout/' + blackout_id, headers=self.headers) self.assertEqual(response.status_code, 200) # non-normal severity alert should be status=open self.alert['severity'] = 'minor' response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'open') # normal severity alert should be status=closed self.alert['severity'] = 'ok' response = self.client.post('/alert', data=json.dumps(self.alert), headers=self.headers) self.assertEqual(response.status_code, 201) data = json.loads(response.data.decode('utf-8')) self.assertEqual(data['alert']['status'], 'closed') class SuppressionBlackout(PluginBase): def pre_receive(self, alert): if alert.is_blackout(): raise BlackoutPeriod("Suppressed alert during blackout period") return alert def post_receive(self, alert): return alert def status_change(self, alert, status, text): return class NotificationBlackout(PluginBase): def pre_receive(self, alert): if alert.is_blackout(): alert.status = 'blackout' return alert def post_receive(self, alert): return alert def status_change(self, alert, status, text): return
39.132159
142
0.62569
1,002
8,883
5.481038
0.113772
0.080117
0.06555
0.080117
0.838128
0.825747
0.825747
0.810452
0.810452
0.810452
0
0.01145
0.233142
8,883
226
143
39.30531
0.794774
0.083305
0
0.651899
0
0
0.135312
0
0
0
0
0
0.208861
1
0.063291
false
0
0.037975
0.025316
0.158228
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
a3c1b1a3c3f915fec60edb7723b8e7991fea8698
539
py
Python
src/sentry/tasks/__init__.py
Casecommons/sentry
b69a2373a658c5c775671fe9985c3fa4f2eafcfd
[ "BSD-3-Clause" ]
null
null
null
src/sentry/tasks/__init__.py
Casecommons/sentry
b69a2373a658c5c775671fe9985c3fa4f2eafcfd
[ "BSD-3-Clause" ]
null
null
null
src/sentry/tasks/__init__.py
Casecommons/sentry
b69a2373a658c5c775671fe9985c3fa4f2eafcfd
[ "BSD-3-Clause" ]
null
null
null
""" sentry.tasks ~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import sentry.tasks.check_alerts # NOQA import sentry.tasks.check_update # NOQA import sentry.tasks.cleanup # NOQA import sentry.tasks.deletion # NOQA import sentry.tasks.email # NOQA import sentry.tasks.fetch_source # NOQA import sentry.tasks.index # NOQA import sentry.tasks.store # NOQA import sentry.tasks.post_process # NOQA import sentry.tasks.process_buffer # NOQA
28.368421
75
0.749536
77
539
5.181818
0.402597
0.303258
0.426065
0.473684
0
0
0
0
0
0
0
0.017241
0.139147
539
18
76
29.944444
0.842672
0.367347
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
a3e539df8ecc04f78a757ff6a642fbe8f528167b
98
py
Python
src/app/forms.py
Gabscurioshop/trentaudio
7ad62908c9e280f6142832e08ab62ade65a475e7
[ "MIT" ]
null
null
null
src/app/forms.py
Gabscurioshop/trentaudio
7ad62908c9e280f6142832e08ab62ade65a475e7
[ "MIT" ]
9
2021-05-08T03:24:16.000Z
2021-05-08T03:30:35.000Z
src/app/forms.py
Gabscurioshop/trentaudio
7ad62908c9e280f6142832e08ab62ade65a475e7
[ "MIT" ]
null
null
null
from wtforms import StringField from wtforms.validators import DataRequired #class SearchForm():
19.6
43
0.836735
11
98
7.454545
0.727273
0.268293
0
0
0
0
0
0
0
0
0
0
0.112245
98
4
44
24.5
0.942529
0.193878
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
433ff39c95c9317aadd97b372e157611b96d9314
73
py
Python
tests/unit/test_create_user.py
mwanjajoel/django-momoapi
b932117007a06e6c729e9ddb289fa615dbf583c5
[ "MIT" ]
10
2020-02-18T13:00:47.000Z
2021-12-09T16:21:25.000Z
tests/unit/test_create_user.py
mwanjajoel/django-momoapi
b932117007a06e6c729e9ddb289fa615dbf583c5
[ "MIT" ]
17
2020-02-18T12:56:22.000Z
2021-09-22T18:36:42.000Z
tests/unit/test_create_user.py
mwanjajoel/django-momoapi
b932117007a06e6c729e9ddb289fa615dbf583c5
[ "MIT" ]
6
2020-02-18T13:01:04.000Z
2022-02-22T19:42:56.000Z
import pytest # create a simple Pytest def test_user_create(): pass
14.6
25
0.739726
11
73
4.727273
0.818182
0
0
0
0
0
0
0
0
0
0
0
0.205479
73
5
26
14.6
0.896552
0.30137
0
0
0
0
0
0
0
0
0
0
0
1
0.333333
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
1
1
1
0
0
0
0
6
4a2d5f0302931bc1eb725aaa95c7182716d70485
14,615
py
Python
tests/api/app_test.py
bretttegart/treadmill
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
[ "Apache-2.0" ]
2
2017-10-31T18:48:20.000Z
2018-03-04T20:35:20.000Z
tests/api/app_test.py
bretttegart/treadmill
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
[ "Apache-2.0" ]
null
null
null
tests/api/app_test.py
bretttegart/treadmill
812109e31c503a6eddaee2d3f2e1faf2833b6aaf
[ "Apache-2.0" ]
null
null
null
"""Cell API tests. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import mock from jsonschema import exceptions as jexceptions from treadmill import admin from treadmill.api import app class ApiAppTest(unittest.TestCase): """treadmill.api.app tests.""" def setUp(self): self.app = app.API() def tearDown(self): pass @mock.patch('treadmill.context.AdminContext.conn') def test_list(self, admin_mock): """Test treadmill.api.app._list()""" apps = [ ( 'app=foo.app1,ou=apps,ou=treadmill,dc=ms,dc=com', { 'app': ['foo.app1'], 'memory': ['1G'], 'cpu': ['100%'], 'disk': ['1G'], } ), ( 'app=foo.app2,ou=apps,ou=treadmill,dc=ms,dc=com', { 'app': ['foo.app2'], 'memory': ['1G'], 'cpu': ['100%'], 'disk': ['1G'], } ), ( 'app=bar.app1,ou=apps,ou=treadmill,dc=ms,dc=com', { 'app': ['bar.app1'], 'memory': ['1G'], 'cpu': ['100%'], 'disk': ['1G'], } ), ] # paged_search returns generator admin_mock.paged_search.return_value = ( (dn, entry) for dn, entry in apps ) self.assertEqual( self.app.list(match='foo.*'), [ { '_id': 'foo.app1', 'affinity_limits': {}, 'args': [], 'cpu': '100%', 'disk': '1G', 'endpoints': [], 'environ': [], 'ephemeral_ports': {}, 'features': [], 'memory': '1G', 'passthrough': [], 'services': [], 'tickets': [], }, { '_id': 'foo.app2', 'affinity_limits': {}, 'args': [], 'cpu': '100%', 'disk': '1G', 'endpoints': [], 'environ': [], 'ephemeral_ports': {}, 'features': [], 'memory': '1G', 'passthrough': [], 'services': [], 'tickets': [], }, ] ) @mock.patch('treadmill.context.AdminContext.conn') def test_list_proid_filtering(self, admin_mock): """Test treadmill.api.app._list() proid filtering""" apps = [ ( 'app=foo.app1,ou=apps,ou=treadmill,dc=ms,dc=com', {'app': ['foo.app1']} ), ( 'app=foo.app2,ou=apps,ou=treadmill,dc=ms,dc=com', {'app': ['foo.app2']} ), ( 'app=bar.app1,ou=apps,ou=treadmill,dc=ms,dc=com', {'app': ['bar.app1']} ), ] admin_mock.paged_search.return_value = apps result = self.app.list() self.assertEqual( {item['_id'] for item in result}, {'foo.app1', 'foo.app2', 'bar.app1'} ) _args, kwargs = admin_mock.paged_search.call_args self.assertEqual( kwargs['search_filter'], '(objectClass=tmApp)' ) result = self.app.list(match='*') self.assertEqual( {item['_id'] for item in result}, {'foo.app1', 'foo.app2', 'bar.app1'} ) _args, kwargs = admin_mock.paged_search.call_args self.assertEqual( kwargs['search_filter'], '(objectClass=tmApp)' ) result = self.app.list(match='foo.*') self.assertEqual( {item['_id'] for item in result}, {'foo.app1', 'foo.app2'} ) _args, kwargs = admin_mock.paged_search.call_args self.assertEqual( kwargs['search_filter'], '(&(objectClass=tmApp)(app=foo.*))' ) result = self.app.list(match='foo.app?') self.assertEqual( {item['_id'] for item in result}, {'foo.app1', 'foo.app2'} ) _args, kwargs = admin_mock.paged_search.call_args self.assertEqual( kwargs['search_filter'], '(&(objectClass=tmApp)(app=foo.*))' ) result = self.app.list(match='foo?app*') self.assertEqual( {item['_id'] for item in result}, {'foo.app1', 'foo.app2'} ) _args, kwargs = admin_mock.paged_search.call_args self.assertEqual( kwargs['search_filter'], '(objectClass=tmApp)' ) @mock.patch('treadmill.context.AdminContext.conn', mock.Mock(return_value=admin.Admin(None, None))) @mock.patch('treadmill.admin.Application.get', mock.Mock(return_value={})) def test_get(self): """Dummy test for treadmill.api.cell.get()""" app_admin = admin.Application(None) self.app.get('proid.name') app_admin.get.assert_called_with('proid.name') @mock.patch('treadmill.context.AdminContext.conn', mock.Mock(return_value=admin.Admin(None, None))) @mock.patch('treadmill.admin.Application.get', mock.Mock(return_value={})) @mock.patch('treadmill.admin.Application.create', mock.Mock()) def test_create(self): """Dummy test for treadmill.api.cell.create(). """ app_admin = admin.Application(None) payload = { 'cpu': '100%', 'memory': '1G', 'disk': '1G', 'tickets': ['a@realm1', 'b@realm2'], 'features': [], 'services': [ { 'name': 'a', 'command': '/a', 'restart': { 'limit': 3, 'interval': 60, }, }, { 'name': 'b', 'command': '/b', }, ], 'endpoints': [ {'name': 'x', 'port': 1, 'type': 'infra'}, {'name': 'y', 'port': 2, 'type': 'infra'}, ], } self.app.create('proid.name', payload) app_admin.create.assert_called_with('proid.name', payload) def test_create_fail_null(self): """Test treadmill.api.cell.create() fails with null services. """ payload = { 'cpu': '100%', 'memory': '1G', 'disk': '1G', 'tickets': [u'a@realm1', u'b@realm2'], 'features': [], 'services': None, 'endpoints': [ {'name': 'x', 'port': 1, 'type': 'infra'}, {'name': 'y', 'port': 2, 'type': 'infra'}, ], } with self.assertRaises(jexceptions.ValidationError): self.app.create('proid.name', payload) def test_create_fail_empty(self): """Test treadmill.api.cell.create() fails with empty list services. """ payload = { 'cpu': '100%', 'memory': '1G', 'disk': '1G', 'tickets': [u'a@realm1', u'b@realm2'], 'features': [], 'services': [], 'endpoints': [ {'name': 'x', 'port': 1, 'type': 'infra'}, {'name': 'y', 'port': 2, 'type': 'infra'}, ], } with self.assertRaises(jexceptions.ValidationError): self.app.create('proid.name', payload) @mock.patch('treadmill.context.AdminContext.conn', mock.Mock(return_value=admin.Admin(None, None))) @mock.patch('treadmill.admin.Application.get', mock.Mock(return_value={})) @mock.patch('treadmill.admin.Application.create', mock.Mock()) def test_create_valid_affinity(self): """Test valid affinity name for treadmill.api.cell.create(). """ app_admin = admin.Application(None) payload = { 'cpu': '100%', 'memory': '1G', 'disk': '1G', 'features': [], 'affinity': 'foo.bar', 'services': [ { 'name': 'a', 'command': '/a', }, ], 'endpoints': [ {'name': 'x', 'port': 1, 'type': 'infra'}, ], } self.app.create('proid.name', payload) app_admin.create.assert_called_with('proid.name', payload) def test_create_invalid_affinity(self): """Test invalid affinity name for treadmill.api.cell.create(). """ payload = { 'cpu': '100%', 'memory': '1G', 'disk': '1G', 'features': [], 'affinity': '/foo.bar', 'services': [ { 'name': 'a', 'command': '/a', }, ], 'endpoints': [ {'name': 'x', 'port': 1, 'type': 'infra'}, ], } with self.assertRaises(jexceptions.ValidationError): self.app.create('proid.name', payload) @mock.patch('treadmill.context.AdminContext.conn', mock.Mock(return_value=admin.Admin(None, None))) @mock.patch('treadmill.admin.Application.get', mock.Mock(return_value={})) @mock.patch('treadmill.admin.Application.create', mock.Mock()) def test_create_docker(self): """Dummy test for treadmill.api.cell.create() for docker""" app_admin = admin.Application(None) payload = { 'cpu': '100%', 'memory': '1G', 'disk': '20G', 'image': 'docker://microsoft/windowsservercore', 'endpoints': [ {'name': 'x', 'port': 1, 'type': 'infra'}, {'name': 'y', 'port': 2, 'type': 'infra'}, ] } self.app.create('proid.name', payload) app_admin.create.assert_called_with('proid.name', payload) @mock.patch('treadmill.context.AdminContext.conn') def test_create_result(self, admin_mock): """Test response for treadmill.api.app.create(). """ admin_mock.dn.return_value = ( 'app=app.foo,ou=apps,ou=treadmill,dc=ms,dc=com' ) checksum = '6bf2b2db162e3043738a5c1d4e62bef5' entry = { 'app': ['foo.app'], 'cpu': ['100%'], 'disk': ['1G'], 'memory': ['1G'], 'service-name;tm-service-%s' % checksum: ['test_svc'], 'service-command;tm-service-%s' % checksum: ['test_cmd'], 'service-restart-limit;tm-service-%s' % checksum: ['5'], 'service-restart-interval;tm-service-%s' % checksum: ['60'], } admin_mock.get.return_value = entry result = self.app.create( 'foo.app', { 'cpu': '100%', 'disk': '1G', 'memory': '1G', 'services': [{'name': 'test_svc', 'command': 'test_cmd'}], } ) entry.update({'objectClass': ['tmApp']}) admin_mock.create.assert_called_once_with( 'app=app.foo,ou=apps,ou=treadmill,dc=ms,dc=com', entry ) self.assertEqual( result, { '_id': 'foo.app', 'cpu': '100%', 'disk': '1G', 'memory': '1G', 'services': [{ 'name': 'test_svc', 'command': 'test_cmd', 'restart': {'limit': 5, 'interval': 60}, }], 'args': [], 'endpoints': [], 'environ': [], 'features': [], 'passthrough': [], 'tickets': [], 'ephemeral_ports': {}, 'affinity_limits': {}, } ) @mock.patch('treadmill.context.AdminContext.conn') def test_update_result(self, admin_mock): """Test response for treadmill.api.app.update(). """ admin_mock.dn.return_value = ( 'app=app.foo,ou=apps,ou=treadmill,dc=ms,dc=com' ) checksum = '6bf2b2db162e3043738a5c1d4e62bef5' entry = { 'app': ['foo.app'], 'cpu': ['100%'], 'disk': ['1G'], 'memory': ['1G'], 'service-name;tm-service-%s' % checksum: ['test_svc'], 'service-command;tm-service-%s' % checksum: ['test_cmd'], 'service-restart-limit;tm-service-%s' % checksum: ['5'], 'service-restart-interval;tm-service-%s' % checksum: ['60'], } admin_mock.get.return_value = entry result = self.app.update( 'foo.app', { 'cpu': '100%', 'disk': '1G', 'memory': '1G', 'services': [{'name': 'test_svc', 'command': 'test_cmd'}], } ) admin_mock.delete.assert_called_once_with( 'app=app.foo,ou=apps,ou=treadmill,dc=ms,dc=com' ) entry.update({'objectClass': ['tmApp']}) admin_mock.create.assert_called_once_with( 'app=app.foo,ou=apps,ou=treadmill,dc=ms,dc=com', entry ) self.assertEqual( result, { '_id': 'foo.app', 'cpu': '100%', 'disk': '1G', 'memory': '1G', 'services': [{ 'name': 'test_svc', 'command': 'test_cmd', 'restart': {'limit': 5, 'interval': 60}, }], 'args': [], 'endpoints': [], 'environ': [], 'features': [], 'passthrough': [], 'tickets': [], 'ephemeral_ports': {}, 'affinity_limits': {}, } ) if __name__ == '__main__': unittest.main()
31.91048
75
0.435785
1,289
14,615
4.815361
0.106284
0.0261
0.043499
0.030127
0.874335
0.859352
0.84888
0.822781
0.780409
0.757048
0
0.02065
0.400274
14,615
457
76
31.980306
0.687507
0.046938
0
0.679293
0
0.027778
0.243255
0.103088
0
0
0
0
0.058081
1
0.032828
false
0.012626
0.022727
0
0.058081
0.002525
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
4a69f00aaa8e4da2eee34bb5b1865d3eb9c06900
57
py
Python
plugins/ListPlugins.py
thomas10-10/az
84fc6082083a1f8a3a2b26e1556f069ef539dd24
[ "MIT" ]
4
2021-06-17T21:54:22.000Z
2021-06-23T15:20:49.000Z
plugins/ListPlugins.py
starterTree/starterTree
bc13f5178915f6b561a450446768719509b09832
[ "MIT" ]
10
2021-11-29T22:44:27.000Z
2021-12-01T16:29:06.000Z
plugins/ListPlugins.py
starterTree/starterTree
bc13f5178915f6b561a450446768719509b09832
[ "MIT" ]
null
null
null
import plugins.ssh import plugins.www import plugins.cmd
14.25
18
0.842105
9
57
5.333333
0.555556
0.8125
0
0
0
0
0
0
0
0
0
0
0.105263
57
3
19
19
0.941176
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
4abaaf41cc57ed3af5ed955f97f841a672799c4e
233
py
Python
estimation/unidimensional/dichotomous/__init__.py
eribean/GIRTH
daf22773aa9cd1c819bf732e1061ebf5cc4dc40e
[ "MIT" ]
null
null
null
estimation/unidimensional/dichotomous/__init__.py
eribean/GIRTH
daf22773aa9cd1c819bf732e1061ebf5cc4dc40e
[ "MIT" ]
null
null
null
estimation/unidimensional/dichotomous/__init__.py
eribean/GIRTH
daf22773aa9cd1c819bf732e1061ebf5cc4dc40e
[ "MIT" ]
null
null
null
from .rasch_conditional import * from .rasch_mml import * from .twopl_mml import * from .twopl_mml_eap import * from .threepl_mml import * from .jml_methods import * from .ability_estimation import * from .ability_threepl import *
21.181818
33
0.785408
33
233
5.272727
0.363636
0.402299
0.224138
0.206897
0.224138
0
0
0
0
0
0
0
0.145923
233
11
34
21.181818
0.874372
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
435a6b8bb611c03e4a0fd36938ce38357c0ef419
30
py
Python
neuronit/back_end/__init__.py
neuronit/pfa
6483f23de3ac43ae1121760ab44a2cae1f2cc901
[ "MIT" ]
null
null
null
neuronit/back_end/__init__.py
neuronit/pfa
6483f23de3ac43ae1121760ab44a2cae1f2cc901
[ "MIT" ]
null
null
null
neuronit/back_end/__init__.py
neuronit/pfa
6483f23de3ac43ae1121760ab44a2cae1f2cc901
[ "MIT" ]
null
null
null
from multinetworking import *
15
29
0.833333
3
30
8.333333
1
0
0
0
0
0
0
0
0
0
0
0
0.133333
30
1
30
30
0.961538
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
6050ed5ad6b500eba6e6a5d1f1e466119998e8a4
135
py
Python
2.Python/test.py
sgeek28/Data-Science
ea0bfd6eeb78f534ab89fc9d4c306adb0087e07e
[ "MIT" ]
null
null
null
2.Python/test.py
sgeek28/Data-Science
ea0bfd6eeb78f534ab89fc9d4c306adb0087e07e
[ "MIT" ]
null
null
null
2.Python/test.py
sgeek28/Data-Science
ea0bfd6eeb78f534ab89fc9d4c306adb0087e07e
[ "MIT" ]
null
null
null
#import hello from hello import * #hello.sayHi("Sneha") #hello.sayHello("Sneha","Shukla") sayHi("Sneha") sayHello("Sneha","Shukla")
13.5
33
0.696296
17
135
5.529412
0.411765
0.234043
0.404255
0
0
0
0
0
0
0
0
0
0.096296
135
9
34
15
0.770492
0.474074
0
0
0
0
0.235294
0
0
0
0
0
0
1
0
true
0
0.333333
0
0.333333
0
1
0
0
null
1
1
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
0
0
0
6
605de176898ffc0de377d9b56ee96189755de084
47
py
Python
tests/_site/apps/catalogue/admin.py
izi-core/izi-core
21176be2d41f0cf54ca954f294209c585f643dba
[ "BSD-3-Clause" ]
null
null
null
tests/_site/apps/catalogue/admin.py
izi-core/izi-core
21176be2d41f0cf54ca954f294209c585f643dba
[ "BSD-3-Clause" ]
null
null
null
tests/_site/apps/catalogue/admin.py
izi-core/izi-core
21176be2d41f0cf54ca954f294209c585f643dba
[ "BSD-3-Clause" ]
null
null
null
from izi.apps.catalogue.admin import * # noqa
23.5
46
0.744681
7
47
5
1
0
0
0
0
0
0
0
0
0
0
0
0.148936
47
1
47
47
0.875
0.085106
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
60b8276d9181fe90fb2bbca9a730748b76736ecd
28
py
Python
webapp/frontend/__init__.py
aleksandergurin/news
9e7d3c35857600445cb6df42ba18d289dc0e37a9
[ "BSD-3-Clause" ]
3
2015-08-20T11:08:28.000Z
2018-01-28T21:22:53.000Z
webapp/frontend/__init__.py
aleksandergurin/news
9e7d3c35857600445cb6df42ba18d289dc0e37a9
[ "BSD-3-Clause" ]
null
null
null
webapp/frontend/__init__.py
aleksandergurin/news
9e7d3c35857600445cb6df42ba18d289dc0e37a9
[ "BSD-3-Clause" ]
null
null
null
from .views import frontend
14
27
0.821429
4
28
5.75
1
0
0
0
0
0
0
0
0
0
0
0
0.142857
28
2
27
14
0.958333
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
71837e60b4bb6bec3cebdd94540a6deff88b0174
213
py
Python
{{ cookiecutter.project_slug }}/{{ cookiecutter.project_module }}/settings/__init__.py
uisautomation/django-boilerplate
62a1162e38fba263021dc39d872cb071533d9cc1
[ "MIT" ]
null
null
null
{{ cookiecutter.project_slug }}/{{ cookiecutter.project_module }}/settings/__init__.py
uisautomation/django-boilerplate
62a1162e38fba263021dc39d872cb071533d9cc1
[ "MIT" ]
26
2018-01-18T11:20:34.000Z
2019-03-10T13:46:52.000Z
{{ cookiecutter.project_slug }}/{{ cookiecutter.project_module }}/settings/__init__.py
uisautomation/django-boilerplate
62a1162e38fba263021dc39d872cb071533d9cc1
[ "MIT" ]
2
2018-01-11T20:55:44.000Z
2019-03-11T16:51:23.000Z
""" The default :py:mod:`{{ cookiecutter.project_module }}.settings` module contains settings which are common to most deployment environments. """ # Import base settings from .base import * # noqa: F401, F403
23.666667
99
0.737089
27
213
5.777778
0.814815
0
0
0
0
0
0
0
0
0
0
0.033149
0.150235
213
8
100
26.625
0.828729
0.835681
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
71bd2b1d8fbecd02c92360684365c170d6bf74d0
128
py
Python
app/logic/benchmark/controllers/__init__.py
imvu/bluesteel
ab52133249a693b3cd2d8593c5d47408a3b0fce6
[ "MIT" ]
10
2017-01-13T06:28:04.000Z
2020-11-18T13:00:26.000Z
app/logic/benchmark/controllers/__init__.py
imvu/bluesteel
ab52133249a693b3cd2d8593c5d47408a3b0fce6
[ "MIT" ]
null
null
null
app/logic/benchmark/controllers/__init__.py
imvu/bluesteel
ab52133249a693b3cd2d8593c5d47408a3b0fce6
[ "MIT" ]
2
2018-03-29T14:10:53.000Z
2019-11-20T08:21:57.000Z
""" Automatic file """ from app.logic.benchmark.controllers.BenchmarkDefinitionController import BenchmarkDefinitionController
32
103
0.851563
10
128
10.9
0.9
0
0
0
0
0
0
0
0
0
0
0
0.070313
128
3
104
42.666667
0.915966
0.109375
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e08569f935f61e251c4dcf11611004b323b95035
24
py
Python
rplugin/python3/konbu/__init__.py
miyanokomiya/konbu.nvim
e3d30e23eee371b8f0bcbf6003a438a2783f47d3
[ "MIT" ]
null
null
null
rplugin/python3/konbu/__init__.py
miyanokomiya/konbu.nvim
e3d30e23eee371b8f0bcbf6003a438a2783f47d3
[ "MIT" ]
null
null
null
rplugin/python3/konbu/__init__.py
miyanokomiya/konbu.nvim
e3d30e23eee371b8f0bcbf6003a438a2783f47d3
[ "MIT" ]
null
null
null
from .main import Konbu
12
23
0.791667
4
24
4.75
1
0
0
0
0
0
0
0
0
0
0
0
0.166667
24
1
24
24
0.95
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e099b507594213b3826d2c0da5a237cd73dc685d
162
py
Python
gdc/gdc/doctype/teilnehmerin/test_teilnehmerin.py
motzmose/gdcvw
356cb094b70219ccda060c4c0ba9fcca842162ff
[ "MIT" ]
null
null
null
gdc/gdc/doctype/teilnehmerin/test_teilnehmerin.py
motzmose/gdcvw
356cb094b70219ccda060c4c0ba9fcca842162ff
[ "MIT" ]
null
null
null
gdc/gdc/doctype/teilnehmerin/test_teilnehmerin.py
motzmose/gdcvw
356cb094b70219ccda060c4c0ba9fcca842162ff
[ "MIT" ]
null
null
null
# Copyright (c) 2022, didaktik-aktuell e.V. and Contributors # See license.txt # import frappe import unittest class TestTeilnehmerin(unittest.TestCase): pass
18
60
0.777778
21
162
6
0.904762
0
0
0
0
0
0
0
0
0
0
0.028571
0.135802
162
8
61
20.25
0.871429
0.54321
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0.333333
0.333333
0
0.666667
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
1
1
0
1
0
0
6
e0ac39d0b51e9335aa7cda28d42d41d900ef7512
10,805
py
Python
repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py
AsM0DeUz/leapp-repository
b67a395ee3d67d3d628037c250a210bb52e9187c
[ "Apache-2.0" ]
null
null
null
repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py
AsM0DeUz/leapp-repository
b67a395ee3d67d3d628037c250a210bb52e9187c
[ "Apache-2.0" ]
null
null
null
repos/system_upgrade/el8toel9/actors/checkvdo/tests/unit_test_checkvdo.py
AsM0DeUz/leapp-repository
b67a395ee3d67d3d628037c250a210bb52e9187c
[ "Apache-2.0" ]
null
null
null
import functools from leapp import reporting from leapp.libraries.actor import checkvdo from leapp.libraries.common.testutils import create_report_mocked, CurrentActorMocked from leapp.libraries.stdlib import api from leapp.models import ( VdoConversionInfo, VdoConversionPostDevice, VdoConversionPreDevice, VdoConversionUndeterminedDevice ) from leapp.utils.report import is_inhibitor class MockedActorNoVdoDevices(CurrentActorMocked): def get_no_vdo_devices_response(self): return True class MockedActorSomeVdoDevices(CurrentActorMocked): def get_no_vdo_devices_response(self): return False def aslist(f): """ Decorator used to convert generator to list """ @functools.wraps(f) def inner(*args, **kwargs): return list(f(*args, **kwargs)) return inner @aslist def _post_conversion_vdos(count=0, complete=0, failing=0, start_char='a'): begin = complete for x in range(begin): yield VdoConversionPostDevice(name='sd{0}'.format(chr(ord(start_char) + x)), complete=True) for x in range(begin, begin + failing): yield VdoConversionPostDevice(name='sd{0}'.format(chr(ord(start_char) + x)), complete=False, check_failed=True, failure='unit testing') begin += failing for x in range(begin, count): yield VdoConversionPostDevice(name='sd{0}'.format(chr(ord(start_char) + x)), complete=False) @aslist def _pre_conversion_vdos(count=0, start_char='a'): for x in range(count): yield VdoConversionPreDevice(name='sd{0}'.format(chr(ord(start_char) + x))) @aslist def _undetermined_conversion_vdos(count=0, failing=False, start_char='a'): for x in range(count): yield VdoConversionUndeterminedDevice(name='sd{0}'.format(chr(ord(start_char) + x)), check_failed=failing, failure='unit testing' if failing else None) # No VDOs tests. def test_no_vdos(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 0 # Concurrent pre- and post-conversion tests. def test_both_conversion_vdo_incomplete(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) post_count = 7 checkvdo.check_vdo( VdoConversionInfo( post_conversion=_post_conversion_vdos(post_count, 5), pre_conversion=_pre_conversion_vdos(3, start_char=chr(ord('a') + post_count)), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 2 assert is_inhibitor(reporting.create_report.report_fields) # Post-conversion tests. def test_post_conversion_multiple_vdo_incomplete(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(7, 5), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 1 assert is_inhibitor(reporting.create_report.report_fields) assert reporting.create_report.report_fields['summary'].startswith('VDO devices') def test_post_conversion_multiple_vdo_complete(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(7, 7), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 0 def test_post_conversion_single_vdo_incomplete(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(1), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 1 assert is_inhibitor(reporting.create_report.report_fields) assert (reporting.create_report.report_fields['summary'].startswith('VDO device') and (not reporting.create_report.report_fields['summary'].startswith('VDO devices'))) def test_post_conversion_single_check_failing(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(2, complete=1, failing=1), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 1 assert is_inhibitor(reporting.create_report.report_fields) assert (reporting.create_report.report_fields['summary'].startswith( 'Unexpected result checking device') and (not reporting.create_report.report_fields['summary'].startswith( 'Unexpected result checking devices'))) def test_post_conversion_multiple_check_failing(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(7, complete=4, failing=3), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 1 assert is_inhibitor(reporting.create_report.report_fields) assert reporting.create_report.report_fields['summary'].startswith( 'Unexpected result checking devices') def test_post_conversion_incomplete_and_check_failing(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(2, failing=1), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 2 assert is_inhibitor(reporting.create_report.report_fields) # Pre-conversion tests. def test_pre_conversion_multiple_vdo_incomplete(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(), pre_conversion=_pre_conversion_vdos(7), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 1 assert is_inhibitor(reporting.create_report.report_fields) assert reporting.create_report.report_fields['summary'].startswith('VDO devices') def test_pre_conversion_single_vdo_incomplete(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(), pre_conversion=_pre_conversion_vdos(1), undetermined_conversion=_undetermined_conversion_vdos())) assert reporting.create_report.called == 1 assert is_inhibitor(reporting.create_report.report_fields) assert (reporting.create_report.report_fields['summary'].startswith('VDO device') and (not reporting.create_report.report_fields['summary'].startswith('VDO devices'))) # Undetermined tests. def test_undetermined_single_check_failing(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos(1, True))) assert reporting.create_report.called == 1 assert is_inhibitor(reporting.create_report.report_fields) assert (reporting.create_report.report_fields['summary'].startswith( 'Unexpected result checking device') and (not reporting.create_report.report_fields['summary'].startswith( 'Unexpected result checking devices'))) def test_undetermined_multiple_check_failing(monkeypatch): monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos(3, failing=True))) assert reporting.create_report.called == 1 assert is_inhibitor(reporting.create_report.report_fields) assert reporting.create_report.report_fields['summary'].startswith( 'Unexpected result checking devices') def test_undetermined_multiple_no_check_no_vdos(monkeypatch): monkeypatch.setattr(api, 'current_actor', MockedActorNoVdoDevices()) monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos(3))) assert reporting.create_report.called == 1 assert not is_inhibitor(reporting.create_report.report_fields) assert reporting.create_report.report_fields['summary'].startswith( 'User has asserted there are no VDO devices') def test_undetermined_multiple_no_check_some_vdos(monkeypatch): monkeypatch.setattr(api, 'current_actor', MockedActorSomeVdoDevices()) monkeypatch.setattr(reporting, 'create_report', create_report_mocked()) checkvdo.check_vdo( VdoConversionInfo(post_conversion=_post_conversion_vdos(), pre_conversion=_pre_conversion_vdos(), undetermined_conversion=_undetermined_conversion_vdos(3))) assert reporting.create_report.called == 1 assert is_inhibitor(reporting.create_report.report_fields) assert reporting.create_report.report_fields['summary'].startswith( 'User has opted to inhibit upgrade')
46.175214
98
0.714114
1,140
10,805
6.413158
0.101754
0.113254
0.155109
0.09602
0.841472
0.826152
0.818766
0.79907
0.79907
0.758993
0
0.005322
0.200093
10,805
233
99
46.373391
0.840565
0.015548
0
0.61413
0
0
0.065876
0
0
0
0
0
0.201087
1
0.11413
false
0
0.038043
0.016304
0.184783
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
e0cf54e514dd3f90dea6296e1ff5f69b2295f783
39
py
Python
vnpy_rohon/gateway/__init__.py
Edanflame/vnpy_rohon
c7ca0033d1f9f0e8fab02b65279accde867a2225
[ "MIT" ]
2
2021-08-17T15:44:19.000Z
2021-11-11T09:03:58.000Z
vnpy_rohon/gateway/__init__.py
Edanflame/vnpy_rohon
c7ca0033d1f9f0e8fab02b65279accde867a2225
[ "MIT" ]
null
null
null
vnpy_rohon/gateway/__init__.py
Edanflame/vnpy_rohon
c7ca0033d1f9f0e8fab02b65279accde867a2225
[ "MIT" ]
3
2021-05-20T08:28:34.000Z
2022-02-08T06:37:24.000Z
from .rohon_gateway import RohonGateway
39
39
0.897436
5
39
6.8
1
0
0
0
0
0
0
0
0
0
0
0
0.076923
39
1
39
39
0.944444
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e0ec3bfc577913e225b720ba06a6663aba50b414
43
py
Python
mtgdata/util/__init__.py
nmichlo/mtg-dataset
6503b835e4593f7198d45d565fdfae863bc98b45
[ "MIT" ]
null
null
null
mtgdata/util/__init__.py
nmichlo/mtg-dataset
6503b835e4593f7198d45d565fdfae863bc98b45
[ "MIT" ]
null
null
null
mtgdata/util/__init__.py
nmichlo/mtg-dataset
6503b835e4593f7198d45d565fdfae863bc98b45
[ "MIT" ]
null
null
null
from mtgdata.util.hdf5 import Hdf5Dataset
14.333333
41
0.837209
6
43
6
1
0
0
0
0
0
0
0
0
0
0
0.052632
0.116279
43
2
42
21.5
0.894737
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
e0f442e13c535c42a0f86c79c6103246fcdf05bc
36
py
Python
multiband_melgan/__init__.py
AppleHolic/multiband_melgan
e0864d0fc205c3bdf5e19c77753e105e29a2641b
[ "MIT" ]
41
2020-06-24T08:07:23.000Z
2022-01-24T16:39:54.000Z
multiband_melgan/__init__.py
AppleHolic/multiband_melgan
e0864d0fc205c3bdf5e19c77753e105e29a2641b
[ "MIT" ]
2
2020-06-24T08:02:15.000Z
2020-11-23T02:56:42.000Z
multiband_melgan/__init__.py
AppleHolic/multiband_melgan
e0864d0fc205c3bdf5e19c77753e105e29a2641b
[ "MIT" ]
5
2020-07-03T04:00:50.000Z
2020-11-04T03:24:48.000Z
from multiband_melgan import models
18
35
0.888889
5
36
6.2
1
0
0
0
0
0
0
0
0
0
0
0
0.111111
36
1
36
36
0.96875
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
460af2f6fdf53e83bfe842d7ca292f8aeb0c5e14
27,888
py
Python
arnold/experiment.py
davidbhr/arnold
2ea777f25f8b5e1cf68e0e41c86997a1554c2cbe
[ "MIT" ]
2
2019-06-25T12:44:55.000Z
2022-02-28T04:26:29.000Z
arnold/experiment.py
davidbhr/arnold
2ea777f25f8b5e1cf68e0e41c86997a1554c2cbe
[ "MIT" ]
null
null
null
arnold/experiment.py
davidbhr/arnold
2ea777f25f8b5e1cf68e0e41c86997a1554c2cbe
[ "MIT" ]
1
2020-06-22T12:35:59.000Z
2020-06-22T12:35:59.000Z
from __future__ import print_function, division from time import sleep import arnold as ar from subprocess import Popen import os import numpy as np import pandas as pd def mesh_and_show(mesh_file, d_cyl, l_cyl, r_outer, length_factor=0.4): """ Creates a spherical bulk mesh with a centered cylindrical inclusion and shows the mesh afterwards Args: mesh_file(str): File path to save mesh file (add .msh ending) d_cyl(float): Diameter of the cylindrical inclusion (in µm) l_cyl(float): Length of the cylindrical inclusion (in µm) r_outer(float): Outer radius of the bulk mesh (in µm) length_factor(float): Mesh element size is determined by curvature and then multipled with this factor """ ar.mesh.cylindrical_inclusion(mesh_file, d_cyl, l_cyl, r_outer, length_factor) ar.mesh.show_mesh(mesh_file) return def cylindrical_inclusion_mesh_and_simulation(mesh_file, d_cyl, l_cyl, r_outer, length_factor, simulation_folder, strain, material, logfile = False, iterations= 300 , step=0.3, conv_crit = 1e-11): """ Creates a spherical bulk mesh with a centered cylindrical inclusion and simulates a symetric contraction (with constant strain) of the cylindric inclusion Args: mesh_file(str): File path to save mesh file (add .msh ending) d_cyl(float): Diameter of the cylindrical inclusion (in µm) l_cyl(float): Length of the cylindrical inclusion (in µm) r_outer(float): Outer radius of the bulk mesh (in µm) length_factor(float): Mesh element size is determined by curvature and then multipled with this factor simulation_folder(str): File path to save simulation results strain(float): Strain as (Length_base - Length_contracted)/Length_base. Deformation is applied in x-direction and split equally to both poles, from which defomation-size decreases linearly to the center (symetric contraction with constant strain). Deformation can be determed as (Length_base - Length_contracted) material (dict): Material properties in the form {'K_0': X, 'D_0':X, 'L_S': X, 'D_S': X}, see materials) logfile(boolean): If True a reduced logfile of the saeno system output is stored. Default: False. iterations(float): The maximal number of iterations for the saeno simulation. Default: 300. step(float): Step width parameter for saeno regularization. Higher values lead to a faster but less robust convergence. Default: 0.3. conv_crit(float): Saeno stops if the relative standard deviation of the residuum is below given threshold. Default: 1e-11. """ ar.mesh.cylindrical_inclusion(mesh_file, d_cyl, l_cyl, r_outer, length_factor) # wait a bit longer until mesh is stored sleep(5) ar.simulation.cylindric_contraction(simulation_folder, mesh_file, d_cyl, l_cyl, r_outer, strain, material, logfile = logfile, iterations= iterations , step=step, conv_crit = conv_crit) return def cylindrical_inclusion_mesh_simulation_and_contractility(mesh_file, d_cyl, l_cyl, r_outer, length_factor, simulation_folder, strain, material, logfile = False, iterations= 300 , step=0.3, conv_crit = 1e-11, scalef = 1000, scaleu = 1, scaleb = 1): """ Creates a spherical bulk mesh with a centered cylindrical inclusion and simulates a symetric contraction (with constant strain) of the cylindric inclusion and computes the contractile forces. Args: mesh_file(str): File path to save mesh file (add .msh ending) d_cyl(float): Diameter of the cylindrical inclusion (in µm) l_cyl(float): Length of the cylindrical inclusion (in µm) r_outer(float): Outer radius of the bulk mesh (in µm) length_factor(float): Mesh element size is determined by curvature and then multipled with this factor simulation_folder(str): File path to save simulation results strain(float): Strain as (Length_base - Length_contracted)/Length_base. Deformation is applied in x-direction and split equally to both poles, from which defomation-size decreases linearly to the center (symetric contraction with constant strain). material (dict): Material properties in the form {'K_0': X, 'D_0':X, 'L_S': X, 'D_S': X}, see materials) logfile(boolean): If True a reduced logfile of the saeno system output is stored. Default: False. iterations(float): The maximal number of iterations for the saeno simulation. Default: 300. step(float): Step width parameter for saeno regularization. Higher values lead to a faster but less robust convergence. Default: 0.3. conv_crit(float): Saeno stops if the relative standard deviation of the residuum is below given threshold. Default: 1e-11. scalef ,scalu, scaleb: To scale the arrows for deformation , force and boundary cond. in quiver plot - only visually no impact on valeus """ #build mesh ar.mesh.cylindrical_inclusion(mesh_file, d_cyl, l_cyl, r_outer, length_factor) #wait a bit longer until mesh is stored sleep(5) #start saeno simulation ar.simulation.cylindric_contraction(simulation_folder, mesh_file, d_cyl, l_cyl, r_outer, strain, material, logfile = logfile, iterations= iterations , step=step, conv_crit = conv_crit) #wait a bit sleep(3) # compute individual contractilities ar.force.reconstruct_contractility(simulation_folder, d_cyl, l_cyl, r_outer, scalef = scalef, scaleu = scaleu, scaleb = scaleu) return def cylindrical_inclusion_simulation_and_contractility(mesh_file, d_cyl, l_cyl, r_outer, simulation_folder, strain, material, logfile = False, iterations= 300 , step=0.3, conv_crit = 1e-11): """ Simulates a symetric contraction (with constant strain) of the cylindric inclusion and computes the contractile forces. Args: mesh_file(str): File path to save mesh file (add .msh ending) d_cyl(float): Diameter of the cylindrical inclusion (in µm) l_cyl(float): Length of the cylindrical inclusion (in µm) r_outer(float): Outer radius of the bulk mesh (in µm) simulation_folder(str): File path to save simulation results strain(float): Strain as (Length_base - Length_contracted)/Length_base. Deformation is applied in x-direction and split equally to both poles, from which defomation-size decreases linearly to the center (symetric contraction with constant strain). material (dict): Material properties in the form {'K_0': X, 'D_0':X, 'L_S': X, 'D_S': X}, see materials) logfile(boolean): If True a reduced logfile of the saeno system output is stored. Default: False. iterations(float): The maximal number of iterations for the saeno simulation. Default: 300. step(float): Step width parameter for saeno regularization. Higher values lead to a faster but less robust convergence. Default: 0.3. conv_crit(float): Saeno stops if the relative standard deviation of the residuum is below given threshold. Default: 1e-11. """ ar.simulation.cylindric_contraction(simulation_folder, mesh_file, d_cyl, l_cyl, r_outer, strain, material, logfile = logfile, iterations= iterations , step=step, conv_crit = conv_crit) ar.force.reconstruct_contractility(simulation_folder, d_cyl, l_cyl, r_outer) return def simulation_series_lengths(d_cyl, l_cyl_min, l_cyl_max, n, r_outer, length_factor, simulation_folder, strain, material, log_scaling=True, n_cores=None, dec=10, logfile = True, iterations= 300 , step=0.3, conv_crit = 1e-11): """ Starts a series of simulation for different fiber lengths and evaluates fiber contractility Args: d_cyl(float): Diameter of the cylindrical inclusion (in µm) l_cyl_min(float): Minmal length of the cylindrical inclusion (in µm) l_cyl_max(float): Maximal length of the cylindrical inclusion (in µm) n(float): Number of simulates to be made between minimal and maximal length r_outer(float): Outer radius of the bulk mesh (in µm) length_factor(float): Mesh element size is determined by curvature and then multipled with this factor simulation_folder(str): File path to save simulation results strain(float): Strain as (Length_base - Length_contracted)/Length_base. Deformation is applied in x-direction and split equally to both poles, from which defomation-size decreases linearly to the center (symetric contraction with constant strain). log_scaling(boolean): Logarithmic or Linear scaling between cell lengths n_cores(float): Amount of simultanious processes to be started, default detects the amount of CPU cores dec(int): Decimal value to round the lengths, default is 10 material (dict): Material properties in the form {'K_0': X, 'D_0':X, 'L_S': X, 'D_S': X}, see materials) logfile(boolean): If True a reduced logfile of the saeno system output is stored. Default: True. iterations(float): The maximal number of iterations for the saeno simulation. Default: 300. step(float): Step width parameter for saeno regularization. Higher values lead to a faster but less robust convergence. Default: 0.3. conv_crit(float): Saeno stops if the relative standard deviation of the residuum is below given threshold. Default: 1e-11. """ # detect number of cores for paralell computing if n_cores is None: n_cores = os.cpu_count() print(str(n_cores)+' cores detected') # List of lengths in logarithmic steps to start simulation if log_scaling: L = np.logspace(np.log10(l_cyl_min), np.log10(l_cyl_max), num=n, endpoint=True) # List of lengths in linear steps to start simulation else: L = np.linspace(l_cyl_min, l_cyl_max, num=n, endpoint=True) # Limit decimal L = np.around(L, decimals=dec) print('Lengths: '+str(L)) L = list(L) # create output folder if it doesn't exist if not os.path.exists(simulation_folder): os.makedirs(simulation_folder) # Start single simulations in paralell processes = [] while True: if len(L) == 0: break if len(processes) < n_cores: command = '''python -c "import arnold as ar; ar.experiment.cylindrical_inclusion_mesh_simulation_and_contractility(r'{}',{},{},{},{},r'{}',{},{},{},{},{},{})"'''.format(simulation_folder+'\\'+str(L[0])+'.msh', d_cyl, L[0], r_outer, length_factor, simulation_folder+'\\'+str(L[0]), strain, material, logfile, iterations , step, conv_crit ) processes.append(Popen(command)) del L[0] sleep(1.) processes = [p for p in processes if p.poll() is None] def simulation_series_diameter(d_cyl_min, d_cyl_max, l_cyl, n, r_outer, length_factor, simulation_folder, strain, material, log_scaling=True, n_cores=None, dec=2 , logfile = True, iterations= 300 , step=0.3, conv_crit = 1e-11): """ Starts a series of simulation for different fiber diameters and evaluates the fiber contractility Args: d_cyl_min(float): Minimal diameter of the cylindrical inclusion (in µm) d_cyl_max(float): Maximal diameter of the cylindrical inclusion (in µm) l_cyl(float): Length of the cylindrical inclusion (in µm) n(float): Number of simulates to be made between minimal and maximal length r_outer(float): Outer radius of the bulk mesh (in µm) length_factor(float): Mesh element size is determined by curvature and then multipled with this factor simulation_folder(str): File path to save simulation results Strain(float): Strain applied on the muscle fiber (Length_base - Length_contracted/Length_base) # Total deformation of the cylindric inclusion (in µm). Deformation is applied in x-direction # and split equally to both poles, from which defomation-size decreases linearly to the center # (symetric contraction with constant strain). Positive values denote a contraction. # Deformation can be determed as (Length_base - Length_contracted) log_scaling(boolean): Logarithmic or Linear scaling between cell lengths n_cores(float): Amount of simultanious processes to be started, default detects the amount of CPU cores dec(int): Decimal value to round the lengths, default is 10 material (dict): Material properties in the form {'K_0': X, 'D_0':X, 'L_S': X, 'D_S': X}, see materials) logfile(boolean): If True a reduced logfile of the saeno system output is stored. Default: True. iterations(float): The maximal number of iterations for the saeno simulation. Default: 300. step(float): Step width parameter for saeno regularization. Higher values lead to a faster but less robust convergence. Default: 0.3. conv_crit(float): Saeno stops if the relative standard deviation of the residuum is below given threshold. Default: 1e-11. """ # detect number of cores for paralell computing if n_cores is None: n_cores = os.cpu_count() print(str(n_cores)+' cores detected') # List of lengths in logarithmic steps to start simulation if log_scaling: d = np.logspace(np.log10(d_cyl_min), np.log10(d_cyl_max), num=n, endpoint=True) # List of lengths in linear steps to start simulation else: d = np.linspace(d_cyl_min, d_cyl_max, num=n, endpoint=True) # Limit decimal d = np.around(d, decimals=dec) print('Diameters: '+str(d)) d = list(d) # create output folder if it doesn't exist if not os.path.exists(simulation_folder): os.makedirs(simulation_folder) # Start single simulations in paralell processes = [] while True: if len(d) == 0: break if len(processes) < n_cores: command = '''python -c "import arnold as ar; ar.experiment.cylindrical_inclusion_mesh_simulation_and_contractility(r'{}',{},{},{},{},r'{}',{},{},{},{},{},{})"'''.format(simulation_folder+'\\'+str(d[0])+'.msh', d[0], l_cyl, r_outer, length_factor, simulation_folder+'\\'+str(d[0]), strain, material, logfile, iterations , step, conv_crit ) processes.append(Popen(command)) del d[0] sleep(1.) processes = [p for p in processes if p.poll() is None] def simulation_series_strain(d_cyl, l_cyl, n, r_outer, length_factor, simulation_folder, strain_min, strain_max, material, log_scaling=True, n_cores=None, dec=2 , logfile = True, iterations= 300 , step=0.3, conv_crit = 1e-11): """ Starts a series of simulation for different cell lengths and evaluates cell contractility Args: d_cyl(float): Diameter of the cylindrical inclusion (in µm) l_cyl(float): Length of the cylindrical inclusion (in µm) n(float): Number of simulates to be made between minimal and maximal Diameter r_outer(float): Outer radius of the bulk mesh (in µm) length_factor(float): Mesh element size is determined by curvature and then multipled with this factor simulation_folder(str): File path to save simulation results strain_min(float): Minimal strain applied on the muscle fiber (Length_base - Length_contracted/Length_base) strain_max(float): Maximal strain applied on the muscle fiber (Length_base - Length_contracted/Length_base) # Total deformation of the cylindric inclusion (in µm). Deformation is applied in x-direction # and split equally to both poles, from which defomation-size decreases linearly to the center # (symetric contraction with constant strain). Positive values denote a contraction. # Deformation can be determed as (Length_base - Length_contracted) log_scaling(boolean): Logarithmic or Linear scaling between cell lengths n_cores(float): Amount of simultanious processes to be started, default detects the amount of CPU cores dec(int): Decimal value to round the lengths, default is 10 material (dict): Material properties in the form {'K_0': X, 'D_0':X, 'L_S': X, 'D_S': X}, see materials) logfile(boolean): If True a reduced logfile of the saeno system output is stored. Default: True. iterations(float): The maximal number of iterations for the saeno simulation. Default: 300. step(float): Step width parameter for saeno regularization. Higher values lead to a faster but less robust convergence. Default: 0.3. conv_crit(float): Saeno stops if the relative standard deviation of the residuum is below given threshold. Default: 1e-11. """ # detect number of cores for paralell computing if n_cores is None: n_cores = os.cpu_count() print(str(n_cores)+' cores detected') # List of lengths in logarithmic steps to start simulation if log_scaling: e = np.logspace(np.log10(strain_min), np.log10(strain_max), num=n, endpoint=True) # List of lengths in linear steps to start simulation else: e = np.linspace(strain_min, strain_max, num=n, endpoint=True) # Limit decimal e = np.around(e, decimals=dec) print('Strains: '+str(e)) # create output folder if it doesn't exist DOPPELT if not os.path.exists(simulation_folder): os.makedirs(simulation_folder) e = list(e) # Start single simulations in paralell processes = [] while True: if len(e) == 0: break if len(processes) < n_cores: command = '''python -c "import arnold as ar; ar.experiment.cylindrical_inclusion_mesh_simulation_and_contractility(r'{}',{},{},{},{},r'{}',{},{},{},{},{},{})"'''.format(simulation_folder+'\\'+str(e[0])+'.msh', d_cyl, l_cyl, r_outer, length_factor, simulation_folder+'\\'+str(e[0]), e[0], material, logfile, iterations , step, conv_crit ) processes.append(Popen(command)) del e[0] sleep(1.) processes = [p for p in processes if p.poll() is None] def simulation_series_stiffness(d_cyl, l_cyl, n, r_outer, length_factor, simulation_folder, strain, material_min, material_max, log_scaling=True, n_cores=None, dec=2 , logfile = True, iterations= 300 , step=0.3, conv_crit = 1e-11): """ Starts a series of simulation for different cell lengths and evaluates cell contractility Args: d_cyl(float): Diameter of the cylindrical inclusion (in µm) l_cyl(float): Length of the cylindrical inclusion (in µm) n(float): Number of simulates to be made between minimal and maximal Diameter r_outer(float): Outer radius of the bulk mesh (in µm) length_factor(float): Mesh element size is determined by curvature and then multipled with this factor simulation_folder(str): File path to save simulation results strain_(float): Strain applied on the muscle fiber (Length_base - Length_contracted/Length_base) # Total deformation of the cylindric inclusion (in µm). Deformation is applied in x-direction # and split equally to both poles, from which defomation-size decreases linearly to the center # (symetric contraction with constant strain). Positive values denote a contraction. # Deformation can be determed as (Length_base - Length_contracted) log_scaling(boolean): Logarithmic or Linear scaling between cell lengths n_cores(float): Amount of simultanious processes to be started, default detects the amount of CPU cores dec(int): Decimal value to round the lengths, default is 10 material_min (dict): Material properties in the form {'K_0': X, 'D_0':X, 'L_S': X, 'D_S': X}, see materials) ranging from min K_0 stiffness to max K_0 stiffness material_max (dict): Material properties in the form {'K_0': X, 'D_0':X, 'L_S': X, 'D_S': X}, see materials) ranging from min K_0 stiffness to max K_0 stiffness logfile(boolean): If True a reduced logfile of the saeno system output is stored. Default: True. iterations(float): The maximal number of iterations for the saeno simulation. Default: 300. step(float): Step width parameter for saeno regularization. Higher values lead to a faster but less robust convergence. Default: 0.3. conv_crit(float): Saeno stops if the relative standard deviation of the residuum is below given threshold. Default: 1e-11. """ # detect number of cores for paralell computing if n_cores is None: n_cores = os.cpu_count() print(str(n_cores)+' cores detected') # List of lengths in logarithmic steps to start simulation if log_scaling: k = np.logspace(np.log10(material_min['K_0']), np.log10(material_max['K_0']), num=n, endpoint=True) # List of lengths in linear steps to start simulation else: k = np.linspace(material_min['K_0'], material_max['K_0'], num=n, endpoint=True) # Limit decimal k = np.around(k, decimals=dec) print('Strains: '+str(k)) # create output folder if it doesn't exist if not os.path.exists(simulation_folder): os.makedirs(simulation_folder) k = list(k) # Start single simulations in paralell processes = [] while True: if len(k) == 0: break if len(processes) < n_cores: material_min['K_0'] = str(k[0]) command = '''python -c "import arnold as ar; ar.experiment.cylindrical_inclusion_mesh_simulation_and_contractility(r'{}',{},{},{},{},r'{}',{},{},{},{},{},{})"'''.format(simulation_folder+'\\'+str(k[0])+'.msh', d_cyl, l_cyl, r_outer, length_factor, simulation_folder+'\\'+str(k[0]), strain, material_min, logfile, iterations , step, conv_crit ) processes.append(Popen(command)) del k[0] sleep(1.) processes = [p for p in processes if p.poll() is None] def evaluate_series(path, comment=''): """ Evaluates several simulations and stores a overview in an excel file Args: path(str): File path to search subfolders for simulations comment(str): Comment which is added to the evaluation file containing all simulations of all found subfolders """ # initialize summary dictionary results = {'Diameter [µm]': [], 'Length [µm]': [], 'Strain [%]': [],'Contractility Absolute (mean) [N]': [], 'Contractility x-components (mean) [N]': [], 'Residuum Forces [N]': [], 'K_0': [], 'D_0': [],'L_S': [],'D_S': [], 'Deformation [µm]': [], 'Simulation Folder': []} print ('Files found:') for root, dirs, files in os.walk(path): for name in files: if name.endswith(("Contractilities.xlsx")): print (str(root)+"\\parameters.txt") print (str(root)+"\\Contractilities.xlsx") pd.set_option('display.max_colwidth', -1) # use full length strings in pandas Parameters = pd.read_csv(root+"\\parameters.txt", delimiter='=' , encoding = "ISO-8859-1", header=None) Parameters.columns = ['name','value'] # Append values from parameter.txt results['K_0'].append(float(Parameters[Parameters['name'].str.contains('K_0')]['value'])) # K0 value results['D_0'].append(float(Parameters[Parameters['name'].str.contains('D_0')]['value'])) results['L_S'].append(float(Parameters[Parameters['name'].str.contains('L_S')]['value'])) results['D_S'].append(float(Parameters[Parameters['name'].str.contains('D_S')]['value'])) results['Diameter [µm]'].append(float(str(Parameters[Parameters['name'].str.contains('d_cyl')]['value']).split()[1])) # one more split needed due to um string inside.. length = float(str(Parameters[Parameters['name'].str.contains('l_cyl')]['value']).split()[1]) results['Length [µm]'].append(length) deformation = float(str(Parameters[Parameters['name'].str.contains('deformation')]['value']).split()[1]) results['Deformation [µm]'].append(deformation) # one more split needed due to um string inside.. results['Strain [%]'].append((deformation/length)*100) #results['Simulation Folder'].append(str(Parameters[Parameters['name'].str.contains('Output_folder')]['value']).split()[1]) #simulation folder from paramteres results['Simulation Folder'].append(str(root)) # in case the name is manually changed after simulation # Append values from Contractilities.xlsx Contractilities = pd.read_excel(root+"\\Contractilities.xlsx") results['Contractility Absolute (mean) [N]'].append(float(Contractilities['Contractility Absolute (mean) [N]'])) results['Contractility x-components (mean) [N]'].append(float(Contractilities['Contractility x-components (mean) [N]'])) results['Residuum Forces [N]'].append(float(Contractilities['Residuum Forces [N]'])) df = pd.DataFrame.from_dict(results) df.columns = ['Diameter [µm]', 'Length [µm]', 'Strain [%]', 'Contractility Absolute (mean) [N]', 'Contractility x-components (mean) [N]', 'Residuum Forces [N]', 'K_0', 'D_0', 'L_S', 'D_S', 'Deformation [µm]', 'Simulation Folder'] df.to_excel(path+"\\series_evaluation_"+str(comment)+".xlsx") return df
52.322702
245
0.626004
3,573
27,888
4.767982
0.089561
0.013501
0.016025
0.026415
0.862879
0.848439
0.835583
0.819735
0.798251
0.788448
0
0.011154
0.283097
27,888
532
246
52.421053
0.840944
0.516997
0
0.444444
0
0
0.125069
0.039089
0
0
0
0
0
1
0.05
false
0
0.061111
0
0.138889
0.066667
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
460b712452670a1f77501add9c9d6d1b63a66caf
1,975
py
Python
firsttest/models/plotresults.py
charlos1204/firsttest
2c66385ae7149d1403071c2bf6da997873350556
[ "MIT" ]
null
null
null
firsttest/models/plotresults.py
charlos1204/firsttest
2c66385ae7149d1403071c2bf6da997873350556
[ "MIT" ]
null
null
null
firsttest/models/plotresults.py
charlos1204/firsttest
2c66385ae7149d1403071c2bf6da997873350556
[ "MIT" ]
null
null
null
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt """ Author: Carlos Garcia-Perez Date: 25.06.2019 add support for save images in hosts without Display support 18.06.2019 first version of the script """ def plot_acc_loss(history, opt): if opt == 'plot': loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) acc = history.history['acc'] val_acc = history.history['val_acc'] plt.figure(1) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.figure(2) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() elif opt == 'save': loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(loss) + 1) acc = history.history['acc'] val_acc = history.history['val_acc'] plt.figure(1) plt.plot(epochs, loss, 'bo', label='Training loss') plt.plot(epochs, val_loss, 'b', label='Validation loss') plt.title('Training and validation loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.savefig("/data/training_validation_loss.png") plt.figure(2) plt.plot(epochs, acc, 'bo', label='Training acc') plt.plot(epochs, val_acc, 'b', label='Validation acc') plt.title('Training and validation accuracy') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.savefig("/data/training_validation_accuracy.png")
30.384615
77
0.598987
252
1,975
4.623016
0.238095
0.060086
0.08927
0.054936
0.792275
0.792275
0.792275
0.792275
0.792275
0.792275
0
0.016338
0.256203
1,975
65
78
30.384615
0.776719
0
0
0.808511
0
0
0.223872
0.039604
0
0
0
0
0
1
0.021277
false
0
0.042553
0
0.06383
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
1ca23355ff0fac7f94a85cada9e2e92793892062
4,436
py
Python
tests/web/admin/test_admin_receipts_controller.py
zakharovadaria/receipts
a1ac0936d8df4191c5500b605341e6380276595d
[ "MIT" ]
2
2020-02-01T17:42:23.000Z
2020-02-01T18:09:00.000Z
tests/web/admin/test_admin_receipts_controller.py
zakharovadaria/receipts
a1ac0936d8df4191c5500b605341e6380276595d
[ "MIT" ]
null
null
null
tests/web/admin/test_admin_receipts_controller.py
zakharovadaria/receipts
a1ac0936d8df4191c5500b605341e6380276595d
[ "MIT" ]
null
null
null
from flask.testing import FlaskClient from app.models.receipt import Receipt from db import session from schemas import ReceiptClientSchema from tests.test_receipts import prepare_create_receipt def test_status_without_auth(test_client: FlaskClient): response = test_client.get('/api/admin/v1/receipts/') actual = response.status_code expected = 401 assert actual == expected def test_status(test_client: FlaskClient, admin_headers: dict): response = test_client.get('/api/admin/v1/receipts/', headers=admin_headers) actual = response.status_code expected = 200 assert actual == expected def test_count(test_client: FlaskClient, admin_headers: dict): first_ingredient, second_ingredient, first_step, second_step = prepare_create_receipt() receipt = Receipt( name='Name', description='Cool', calories=200, ingredients=[first_ingredient, second_ingredient], steps=[first_step, second_step], ) session.add(receipt) session.commit() response = test_client.get('/api/admin/v1/receipts/', headers=admin_headers) actual = len(response.json['result']) expected = 1 assert actual == expected def test_get(test_client: FlaskClient, admin_headers: dict): first_ingredient, second_ingredient, first_step, second_step = prepare_create_receipt() receipt = Receipt( name='Name', description='Cool', calories=200, ingredients=[first_ingredient, second_ingredient], steps=[first_step, second_step], ) session.add(receipt) session.commit() response = test_client.get('/api/admin/v1/receipts/', headers=admin_headers) actual = response.json['result'][0] expected = ReceiptClientSchema().dump(receipt) assert actual == expected def test_show(test_client: FlaskClient, admin_headers: dict): first_ingredient, second_ingredient, first_step, second_step = prepare_create_receipt() receipt = Receipt( name='Name', description='Cool', calories=200, ingredients=[first_ingredient, second_ingredient], steps=[first_step, second_step], ) session.add(receipt) session.commit() response = test_client.get(f"/api/admin/v1/receipts/{receipt.id}/", headers=admin_headers) actual = response.json['result'] expected = ReceiptClientSchema().dump(receipt) assert actual == expected def test_create(test_client: FlaskClient, admin_headers: dict): first_ingredient, second_ingredient, first_step, second_step = prepare_create_receipt() data = ({ "name": 'Name', "description": 'Cool', "calories": 200, "ingredients": [first_ingredient.id, second_ingredient.id], "steps": [first_step, second_step], }) response = test_client.post('/api/admin/v1/receipts/', json=data, headers=admin_headers) actual = response.json['result'] expected = ReceiptClientSchema().dump(session.query(Receipt).one()) assert actual == expected def test_update(test_client: FlaskClient, admin_headers: dict): first_ingredient, second_ingredient, first_step, second_step = prepare_create_receipt() receipt = Receipt( name='Name', description='Cool', calories=200, ingredients=[first_ingredient, second_ingredient], steps=[first_step, second_step], ) session.add(receipt) session.commit() data = ({ "name": 'Name1', "description": 'Cool1', "calories": 201, "ingredients": [second_ingredient.id], "steps": [first_step], }) response = test_client.put(f"/api/admin/v1/receipts/{receipt.id}/", json=data, headers=admin_headers) actual = response.json['result'] expected = ReceiptClientSchema().dump(receipt) assert actual == expected def test_delete(test_client: FlaskClient, admin_headers: dict): first_ingredient, second_ingredient, first_step, second_step = prepare_create_receipt() receipt = Receipt( name='Name', description='Cool', calories=200, ingredients=[first_ingredient, second_ingredient], steps=[first_step, second_step], ) session.add(receipt) session.commit() test_client.delete(f"/api/admin/v1/receipts/{receipt.id}/", headers=admin_headers) actual = session.query(Receipt).count() expected = 0 assert actual == expected
28.805195
105
0.687782
495
4,436
5.947475
0.135354
0.054348
0.061141
0.077446
0.831861
0.774796
0.740489
0.727582
0.714334
0.671875
0
0.01122
0.196348
4,436
153
106
28.993464
0.814586
0
0
0.627273
0
0
0.087692
0.050271
0
0
0
0
0.072727
1
0.072727
false
0
0.045455
0
0.118182
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6